From: Iustin Pop Date: Tue, 12 Jan 2010 10:24:03 +0000 (+0100) Subject: Merge branch 'devel-2.1' into stable-2.1 X-Git-Tag: v2.1.0rc3~23 X-Git-Url: https://code.grnet.gr/git/ganeti-local/commitdiff_plain/b13dfb92ab93b63c3feb238d27e8b5cd2a495fce?hp=b8313b298dc11f189ab3da6bf625ce1e1350cfb4 Merge branch 'devel-2.1' into stable-2.1 --- diff --git a/Makefile.am b/Makefile.am index 8aa6430..75d5387 100644 --- a/Makefile.am +++ b/Makefile.am @@ -411,6 +411,10 @@ lib/_autoconf.py: Makefile stamp-directories echo ''; \ echo '"""'; \ echo ''; \ + echo '# pylint: disable-msg=C0301,C0324'; \ + echo '# because this is autogenerated, we do not want'; \ + echo '# style warnings' ; \ + echo ''; \ echo "PACKAGE_VERSION = '$(PACKAGE_VERSION)'"; \ echo "VERSION_MAJOR = '$(VERSION_MAJOR)'"; \ echo "VERSION_MINOR = '$(VERSION_MINOR)'"; \ @@ -469,6 +473,10 @@ ganeti: check-local: $(CHECK_PYTHON_CODE) $(check_python_code) +.PHONY: lint +lint: ganeti + pylint $(LINT_OPTS) ganeti $(dist_sbin_SCRIPTS) $(dist_tools_SCRIPTS) + # a dist hook rule for catching revision control directories distcheck-hook: if find $(top_distdir) -name .svn -or -name .git | grep .; then \ diff --git a/daemons/ganeti-confd b/daemons/ganeti-confd index c560e7f..b7198d0 100755 --- a/daemons/ganeti-confd +++ b/daemons/ganeti-confd @@ -26,12 +26,20 @@ It uses UDP+HMAC for authentication with a global cluster key. """ +# pylint: disable-msg=C0103 +# C0103: Invalid name ganeti-confd + import os import sys import logging -import pyinotify import time +try: + # pylint: disable-msg=E0611 + from pyinotify import pyinotify +except ImportError: + import pyinotify + from optparse import OptionParser from ganeti import asyncnotifier @@ -40,7 +48,6 @@ from ganeti.confd import server as confd_server from ganeti import constants from ganeti import errors from ganeti import daemon -from ganeti import ssconf class ConfdAsyncUDPServer(daemon.AsyncUDPSocket): @@ -63,7 +70,7 @@ class ConfdAsyncUDPServer(daemon.AsyncUDPSocket): self.port = port self.processor = processor self.bind((bind_address, port)) - logging.debug("listening on ('%s':%d)" % (bind_address, port)) + logging.debug("listening on ('%s':%d)", bind_address, port) # this method is overriding a daemon.AsyncUDPSocket method def handle_datagram(self, payload_in, ip, port): @@ -84,23 +91,25 @@ class ConfdAsyncUDPServer(daemon.AsyncUDPSocket): class ConfdInotifyEventHandler(pyinotify.ProcessEvent): def __init__(self, watch_manager, callback, - file=constants.CLUSTER_CONF_FILE): + filename=constants.CLUSTER_CONF_FILE): """Constructor for ConfdInotifyEventHandler @type watch_manager: L{pyinotify.WatchManager} @param watch_manager: ganeti-confd inotify watch manager @type callback: function accepting a boolean @param callback: function to call when an inotify event happens - @type file: string - @param file: config file to watch + @type filename: string + @param filename: config file to watch """ # no need to call the parent's constructor self.watch_manager = watch_manager self.callback = callback + # pylint: disable-msg=E1103 + # pylint for some reason doesn't see the below constants self.mask = pyinotify.EventsCodes.IN_IGNORED | \ pyinotify.EventsCodes.IN_MODIFY - self.file = file + self.file = filename self.watch_handle = None def enable(self): @@ -130,7 +139,7 @@ class ConfdInotifyEventHandler(pyinotify.ProcessEvent): # IN_IGNORED event from inotify, because of the file removal (which is # contextual with the replacement). In such a case we need to create # another watcher for the "new" file. - logging.debug("Received 'ignored' inotify event for %s" % event.path) + logging.debug("Received 'ignored' inotify event for %s", event.path) self.watch_handle = None try: @@ -140,7 +149,7 @@ class ConfdInotifyEventHandler(pyinotify.ProcessEvent): # going to realod the file after setting up the new watch. self.callback(False) except errors.ConfdFatalError, err: - logging.critical("Critical error, shutting down: %s" % err) + logging.critical("Critical error, shutting down: %s", err) sys.exit(constants.EXIT_FAILURE) except: # we need to catch any exception here, log it, but proceed, because even @@ -153,12 +162,12 @@ class ConfdInotifyEventHandler(pyinotify.ProcessEvent): # usually happen in Ganeti, as the config file is normally replaced by a # new one, at filesystem level, rather than actually modified (see # utils.WriteFile) - logging.debug("Received 'modify' inotify event for %s" % event.path) + logging.debug("Received 'modify' inotify event for %s", event.path) try: self.callback(True) except errors.ConfdFatalError, err: - logging.critical("Critical error, shutting down: %s" % err) + logging.critical("Critical error, shutting down: %s", err) sys.exit(constants.EXIT_FAILURE) except: # we need to catch any exception here, log it, but proceed, because even @@ -167,7 +176,7 @@ class ConfdInotifyEventHandler(pyinotify.ProcessEvent): logging.error("Unexpected exception", exc_info=True) def process_default(self, event): - logging.error("Received unhandled inotify event: %s" % event) + logging.error("Received unhandled inotify event: %s", event) class ConfdConfigurationReloader(object): @@ -315,10 +324,14 @@ class ConfdConfigurationReloader(object): self._ResetTimer() -def CheckConfd(options, args): +def CheckConfd(_, args): """Initial checks whether to run exit with a failure. """ + if args: # confd doesn't take any arguments + print >> sys.stderr, ("Usage: %s [-f] [-d] [-b ADDRESS]" % sys.argv[0]) + sys.exit(constants.EXIT_FAILURE) + # TODO: collapse HMAC daemons handling in daemons GenericMain, when we'll # have more than one. if not os.path.isfile(constants.HMAC_CLUSTER_KEY): @@ -326,10 +339,13 @@ def CheckConfd(options, args): sys.exit(constants.EXIT_FAILURE) -def ExecConfd(options, args): +def ExecConfd(options, _): """Main confd function, executed with PID file held """ + # TODO: clarify how the server and reloader variables work (they are + # not used) + # pylint: disable-msg=W0612 mainloop = daemon.Mainloop() # Asyncronous confd UDP server @@ -340,7 +356,7 @@ def ExecConfd(options, args): # If enabling the processor has failed, we can still go on, but confd will # be disabled logging.warning("Confd is starting in disabled mode") - pass + server = ConfdAsyncUDPServer(options.bind_address, options.port, processor) # Configuration reloader diff --git a/daemons/ganeti-masterd b/daemons/ganeti-masterd index 3a77be4..63471d9 100755 --- a/daemons/ganeti-masterd +++ b/daemons/ganeti-masterd @@ -26,6 +26,8 @@ inheritance from parent classes requires it. """ +# pylint: disable-msg=C0103 +# C0103: Invalid name ganeti-masterd import os import sys @@ -61,6 +63,7 @@ EXIT_NODESETUP_ERROR = constants.EXIT_NODESETUP_ERROR class ClientRequestWorker(workerpool.BaseWorker): + # pylint: disable-msg=W0221 def RunTask(self, server, request, client_address): """Process the request. @@ -70,7 +73,7 @@ class ClientRequestWorker(workerpool.BaseWorker): try: server.finish_request(request, client_address) server.close_request(request) - except: + except: # pylint: disable-msg=W0702 server.handle_error(request, client_address) server.close_request(request) @@ -108,7 +111,7 @@ class IOServer(SocketServer.UnixStreamServer): self.request_workers.AddTask(self, request, client_address) @utils.SignalHandled([signal.SIGINT, signal.SIGTERM]) - def serve_forever(self, signal_handlers=None): + def serve_forever(self, signal_handlers=None): # pylint: disable-msg=W0221 """Handle one request at a time until told to quit.""" assert isinstance(signal_handlers, dict) and \ len(signal_handlers) > 0, \ @@ -141,6 +144,8 @@ class ClientRqHandler(SocketServer.BaseRequestHandler): READ_SIZE = 4096 def setup(self): + # pylint: disable-msg=W0201 + # setup() is the api for initialising for this class self._buffer = "" self._msgs = collections.deque() self._ops = ClientOps(self.server) @@ -204,11 +209,13 @@ class ClientOps: def __init__(self, server): self.server = server - def handle_request(self, method, args): + def handle_request(self, method, args): # pylint: disable-msg=R0911 queue = self.server.context.jobqueue # TODO: Parameter validation + # TODO: Rewrite to not exit in each 'if/elif' branch + if method == luxi.REQ_SUBMIT_JOB: logging.info("Received new job") ops = [opcodes.OpCode.LoadOpCode(state) for state in args] @@ -292,6 +299,12 @@ class ClientOps: op = opcodes.OpQueryClusterInfo() return self._Query(op) + elif method == luxi.REQ_QUERY_TAGS: + kind, name = args + logging.info("Received tags query request") + op = opcodes.OpGetTags(kind=kind, name=name) + return self._Query(op) + elif method == luxi.REQ_QUEUE_SET_DRAIN_FLAG: drain_flag = args logging.info("Received queue drain flag change request to %s", @@ -333,6 +346,8 @@ class GanetiContext(object): This class creates and holds common objects shared by all threads. """ + # pylint: disable-msg=W0212 + # we do want to ensure a singleton here _instance = None def __init__(self): @@ -498,12 +513,12 @@ def _RunInSeparateProcess(fn): # Call function result = int(bool(fn())) assert result in (0, 1) - except: + except: # pylint: disable-msg=W0702 logging.exception("Error while calling function in separate process") # 0 and 1 are reserved for the return value result = 33 - os._exit(result) + os._exit(result) # pylint: disable-msg=W0212 # Parent process @@ -529,6 +544,10 @@ def CheckMasterd(options, args): """Initial checks whether to run or exit with a failure. """ + if args: # masterd doesn't take any arguments + print >> sys.stderr, ("Usage: %s [-f] [-d]" % sys.argv[0]) + sys.exit(constants.EXIT_FAILURE) + ssconf.CheckMaster(options.debug) # If CheckMaster didn't fail we believe we are the master, but we have to @@ -544,7 +563,7 @@ def CheckMasterd(options, args): confirmation = sys.stdin.readline().strip() if confirmation != "YES": - print >>sys.stderr, "Aborting." + print >> sys.stderr, "Aborting." sys.exit(constants.EXIT_FAILURE) return @@ -555,7 +574,7 @@ def CheckMasterd(options, args): sys.exit(constants.EXIT_FAILURE) -def ExecMasterd (options, args): +def ExecMasterd (options, args): # pylint: disable-msg=W0613 """Main master daemon function, executed with the PID file held. """ diff --git a/daemons/ganeti-noded b/daemons/ganeti-noded index 0486dd1..fce0d29 100755 --- a/daemons/ganeti-noded +++ b/daemons/ganeti-noded @@ -21,12 +21,16 @@ """Ganeti node daemon""" -# functions in this module need to have a given name structure, so: -# pylint: disable-msg=C0103 +# pylint: disable-msg=C0103,W0142 + +# C0103: Functions in this module need to have a given name structure, +# and the name of the daemon doesn't match + +# W0142: Used * or ** magic, since we do use it extensively in this +# module import os import sys -import SocketServer import logging import signal @@ -42,7 +46,7 @@ from ganeti import http from ganeti import utils from ganeti import storage -import ganeti.http.server +import ganeti.http.server # pylint: disable-msg=W0611 queue_lock = None @@ -72,6 +76,9 @@ class NodeHttpServer(http.server.HttpServer): This class holds all methods exposed over the RPC interface. """ + # too many public methods, and unused args - all methods get params + # due to the API + # pylint: disable-msg=R0904,W0613 def __init__(self, *args, **kwargs): http.server.HttpServer.__init__(self, *args, **kwargs) self.noded_pid = os.getpid() @@ -782,11 +789,21 @@ class NodeHttpServer(http.server.HttpServer): return backend.ValidateHVParams(hvname, hvparams) -def ExecNoded(options, args): +def CheckNoded(_, args): + """Initial checks whether to run or exit with a failure. + + """ + if args: # noded doesn't take any arguments + print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" % + sys.argv[0]) + sys.exit(constants.EXIT_FAILURE) + + +def ExecNoded(options, _): """Main node daemon function, executed with the PID file held. """ - global queue_lock + global queue_lock # pylint: disable-msg=W0603 # Read SSL certificate if options.ssl: @@ -819,7 +836,7 @@ def main(): dirs = [(val, constants.RUN_DIRS_MODE) for val in constants.SUB_RUN_DIRS] dirs.append((constants.LOG_OS_DIR, 0750)) dirs.append((constants.LOCK_DIR, 1777)) - daemon.GenericMain(constants.NODED, parser, dirs, None, ExecNoded) + daemon.GenericMain(constants.NODED, parser, dirs, CheckNoded, ExecNoded) if __name__ == '__main__': diff --git a/daemons/ganeti-rapi b/daemons/ganeti-rapi index 5928537..8ef9365 100755 --- a/daemons/ganeti-rapi +++ b/daemons/ganeti-rapi @@ -18,28 +18,29 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. -""" Ganeti Remote API master script. +"""Ganeti Remote API master script. + """ -import glob +# pylint: disable-msg=C0103,W0142 + +# C0103: Invalid name ganeti-watcher + import logging import optparse import sys import os import os.path -import signal from ganeti import constants -from ganeti import errors from ganeti import http from ganeti import daemon from ganeti import ssconf -from ganeti import utils from ganeti import luxi from ganeti import serializer from ganeti.rapi import connector -import ganeti.http.auth +import ganeti.http.auth # pylint: disable-msg=W0611 import ganeti.http.server @@ -105,7 +106,7 @@ class RemoteApiHttpServer(http.auth.HttpServerRequestAuthentication, method = req.request_method.upper() try: ctx.handler_fn = getattr(ctx.handler, method) - except AttributeError, err: + except AttributeError: raise http.HttpBadRequest("Method %s is unsupported for path %s" % (method, req.request_path)) @@ -182,15 +183,15 @@ def CheckRapi(options, args): """Initial checks whether to run or exit with a failure. """ - if len(args) != 0: - print >> sys.stderr, "Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" % \ - sys.argv[0] + if args: # rapi doesn't take any arguments + print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" % + sys.argv[0]) sys.exit(constants.EXIT_FAILURE) ssconf.CheckMaster(options.debug) -def ExecRapi(options, args): +def ExecRapi(options, _): """Main remote API function, executed with the PID file held. """ diff --git a/daemons/ganeti-watcher b/daemons/ganeti-watcher index 20350a0..6346f07 100755 --- a/daemons/ganeti-watcher +++ b/daemons/ganeti-watcher @@ -27,11 +27,14 @@ by a node reboot. Run from cron or similar. """ +# pylint: disable-msg=C0103,W0142 + +# C0103: Invalid name ganeti-watcher + import os import sys import time import logging -import errno from optparse import OptionParser from ganeti import utils @@ -115,7 +118,7 @@ class WatcherState(object): self._data = {} else: self._data = serializer.Load(state_data) - except Exception, msg: + except Exception, msg: # pylint: disable-msg=W0703 # Ignore errors while loading the file and treat it as empty self._data = {} logging.warning(("Invalid state file. Using defaults." @@ -331,7 +334,7 @@ class Watcher(object): """ arch_count, left_count = client.AutoArchiveJobs(age) - logging.debug("Archived %s jobs, left %s" % (arch_count, left_count)) + logging.debug("Archived %s jobs, left %s", arch_count, left_count) def CheckDisks(self, notepad): """Check all nodes for restarted ones. @@ -369,7 +372,7 @@ class Watcher(object): try: logging.info("Activating disks for instance %s", instance.name) instance.ActivateDisks() - except Exception: + except Exception: # pylint: disable-msg=W0703 logging.exception("Error while activating disks for instance %s", instance.name) @@ -400,7 +403,7 @@ class Watcher(object): instance.name, last) instance.Restart() self.started_instances.add(instance.name) - except Exception: + except Exception: # pylint: disable-msg=W0703 logging.exception("Error while restarting instance %s", instance.name) @@ -464,10 +467,14 @@ def main(): """Main function. """ - global client + global client # pylint: disable-msg=W0603 options, args = ParseOptions() + if args: # watcher doesn't take any arguments + print >> sys.stderr, ("Usage: %s [-f] " % sys.argv[0]) + sys.exit(constants.EXIT_FAILURE) + utils.SetupLogging(constants.LOG_WATCHER, debug=options.debug, stderr_logging=options.debug) diff --git a/devel/release b/devel/release new file mode 100755 index 0000000..ba1c128 --- /dev/null +++ b/devel/release @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright (C) 2009 Google Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA +# 02110-1301, USA. + +# This is a test script to ease development and testing on test clusters. +# It should not be used to update production environments. + +# Usage: release v2.0.5 +# Alternative: URL=file:///my/git/repo release e5823b7e2cd8a3... +# It will clone the given repository from the default or passed URL, +# checkout the given reference (a tag or branch) and then create a +# release archive; you will need to copy the archive and delete the +# temporary directory at the end + +set -e + +: ${URL:=git://git.ganeti.org/ganeti.git} +TAG="$1" + +TMPDIR=`mktemp -d` +cd $TMPDIR +echo "Cloning the repository under $TMPDIR ..." +git clone -q "$URL" dist +cd dist +git checkout $TAG +./autogen.sh +./configure +VERSION=$(sed -n -e '/^PACKAGE_VERSION =/ s/^PACKAGE_VERSION = // p' Makefile) +make distcheck +fakeroot make dist +tar tzvf ganeti-$VERSION.tar.gz +sha1sum ganeti-$VERSION.tar.gz +echo "The archive is at $PWD/ganeti-$VERSION.tar.gz" +echo "Please copy it and remove the temporary directory when done." diff --git a/devel/upload.in b/devel/upload.in index 6c19862..8f31c3b 100644 --- a/devel/upload.in +++ b/devel/upload.in @@ -100,7 +100,9 @@ echo --- # and now put it under $prefix on the target node(s) for host; do echo Uploading code to ${host}... - rsync -v -rlDc --exclude="*.py[oc]" --exclude="*.pdf" --exclude="*.html" \ + rsync -v -rlDc \ + -e "ssh -oBatchMode=yes" \ + --exclude="*.py[oc]" --exclude="*.pdf" --exclude="*.html" \ "$TXD/" \ root@${host}:/ & done @@ -109,7 +111,7 @@ wait if test -z "${NO_RESTART}"; then for host; do echo Restarting ganeti-noded on ${host}... - ssh root@${host} /etc/init.d/ganeti restart & + ssh -oBatchMode=yes root@${host} /etc/init.d/ganeti restart & done wait fi diff --git a/lib/__init__.py b/lib/__init__.py index bab6818..b7fb973 100644 --- a/lib/__init__.py +++ b/lib/__init__.py @@ -20,3 +20,5 @@ # empty file for package definition + +"""Ganeti python modules""" diff --git a/lib/asyncnotifier.py b/lib/asyncnotifier.py index 1498ad0..63c020f 100644 --- a/lib/asyncnotifier.py +++ b/lib/asyncnotifier.py @@ -22,15 +22,20 @@ """Asynchronous pyinotify implementation""" -import pyinotify import asyncore +try: + # pylint: disable-msg=E0611 + from pyinotify import pyinotify +except ImportError: + import pyinotify + class AsyncNotifier(asyncore.file_dispatcher): """An asyncore dispatcher for inotify events. """ - + # pylint: disable-msg=W0622,W0212 def __init__(self, watch_manager, default_proc_fun=None, map=None): """Initializes this class. diff --git a/lib/backend.py b/lib/backend.py index a93397e..2569532 100644 --- a/lib/backend.py +++ b/lib/backend.py @@ -392,7 +392,7 @@ def LeaveCluster(modify_ssh_setup): utils.RemoveFile(constants.HMAC_CLUSTER_KEY) utils.RemoveFile(constants.RAPI_CERT_FILE) utils.RemoveFile(constants.SSL_CERT_FILE) - except: + except: # pylint: disable-msg=W0702 logging.exception("Error while removing cluster secrets") result = utils.RunCmd([constants.DAEMON_UTIL, "stop", constants.CONFD]) @@ -551,6 +551,10 @@ def VerifyNode(what, cluster_name): tmpr.append("The procfs filesystem doesn't seem to be mounted" " under /proc, missing required directory /proc/sys and" " the file /proc/sysrq-trigger") + + if constants.NV_TIME in what: + result[constants.NV_TIME] = utils.SplitTime(time.time()) + return result @@ -1191,6 +1195,8 @@ def BlockdevCreate(disk, size, owner, on_primary, info): it's not required to return anything. """ + # TODO: remove the obsolete 'size' argument + # pylint: disable-msg=W0613 clist = [] if disk.children: for child in disk.children: @@ -1202,6 +1208,7 @@ def BlockdevCreate(disk, size, owner, on_primary, info): # we need the children open in case the device itself has to # be assembled try: + # pylint: disable-msg=E1103 crdev.Open() except errors.BlockDeviceError, err: _Fail("Can't make child '%s' read-write: %s", child, err) @@ -1336,6 +1343,7 @@ def BlockdevAssemble(disk, owner, as_primary): try: result = _RecursiveAssembleBD(disk, owner, as_primary) if isinstance(result, bdev.BlockDev): + # pylint: disable-msg=E1103 result = result.dev_path except errors.BlockDeviceError, err: _Fail("Error while assembling disk: %s", err, exc=True) @@ -1510,7 +1518,7 @@ def BlockdevGetsize(disks): for cf in disks: try: rbd = _RecursiveFindBD(cf) - except errors.BlockDeviceError, err: + except errors.BlockDeviceError: result.append(None) continue if rbd is None: @@ -1631,16 +1639,14 @@ def _ErrnoOrStr(err): return detail -def _OSOndiskAPIVersion(name, os_dir): +def _OSOndiskAPIVersion(os_dir): """Compute and return the API version of a given OS. - This function will try to read the API version of the OS given by - the 'name' parameter and residing in the 'os_dir' directory. + This function will try to read the API version of the OS residing in + the 'os_dir' directory. - @type name: str - @param name: the OS name we should look for @type os_dir: str - @param os_dir: the directory inwhich we should look for the OS + @param os_dir: the directory in which we should look for the OS @rtype: tuple @return: tuple (status, data) with status denoting the validity and data holding either the vaid versions or an error message @@ -1737,7 +1743,7 @@ def _TryOSFromDisk(name, base_dir=None): if os_dir is None: return False, "Directory for OS %s not found in search path" % name - status, api_versions = _OSOndiskAPIVersion(name, os_dir) + status, api_versions = _OSOndiskAPIVersion(os_dir) if not status: # push the error up return status, api_versions @@ -2623,7 +2629,9 @@ class HooksRunner(object): """ if hooks_base_dir is None: hooks_base_dir = constants.HOOKS_BASE_DIR - self._BASE_DIR = hooks_base_dir + # yeah, _BASE_DIR is not valid for attributes, we use it like a + # constant + self._BASE_DIR = hooks_base_dir # pylint: disable-msg=C0103 @staticmethod def ExecHook(script, env): @@ -2741,7 +2749,8 @@ class IAllocatorRunner(object): the master side. """ - def Run(self, name, idata): + @staticmethod + def Run(name, idata): """Run an iallocator script. @type name: str diff --git a/lib/bdev.py b/lib/bdev.py index 7991345..722f12c 100644 --- a/lib/bdev.py +++ b/lib/bdev.py @@ -34,6 +34,10 @@ from ganeti import constants from ganeti import objects +# Size of reads in _CanReadDevice +_DEVICE_READ_SIZE = 128 * 1024 + + def _IgnoreError(fn, *args, **kwargs): """Executes the given function, ignoring BlockDeviceErrors. @@ -66,6 +70,20 @@ def _ThrowError(msg, *args): raise errors.BlockDeviceError(msg) +def _CanReadDevice(path): + """Check if we can read from the given device. + + This tries to read the first 128k of the device. + + """ + try: + utils.ReadFile(path, size=_DEVICE_READ_SIZE) + return True + except EnvironmentError: + logging.warning("Can't read from device %s", path, exc_info=True) + return False + + class BlockDev(object): """Block device abstract class. @@ -605,7 +623,7 @@ class LogicalVolume(BlockDev): _ThrowError("Can't compute PV info for vg %s", self._vg_name) pvs_info.sort() pvs_info.reverse() - free_size, pv_name, _ = pvs_info[0] + free_size, _, _ = pvs_info[0] if free_size < size: _ThrowError("Not enough free space: required %s," " available %s", size, free_size) @@ -765,7 +783,7 @@ class DRBD8Status(object): self.est_time = None -class BaseDRBD(BlockDev): +class BaseDRBD(BlockDev): # pylint: disable-msg=W0223 """Base DRBD class. This class contains a few bits of common functionality between the @@ -960,6 +978,17 @@ class DRBD8(BaseDRBD): def __init__(self, unique_id, children, size): if children and children.count(None) > 0: children = [] + if len(children) not in (0, 2): + raise ValueError("Invalid configuration data %s" % str(children)) + if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6: + raise ValueError("Invalid configuration data %s" % str(unique_id)) + (self._lhost, self._lport, + self._rhost, self._rport, + self._aminor, self._secret) = unique_id + if children: + if not _CanReadDevice(children[1].dev_path): + logging.info("drbd%s: Ignoring unreadable meta device", self._aminor) + children = [] super(DRBD8, self).__init__(unique_id, children, size) self.major = self._DRBD_MAJOR version = self._GetVersion() @@ -968,13 +997,6 @@ class DRBD8(BaseDRBD): " usage: kernel is %s.%s, ganeti wants 8.x", version['k_major'], version['k_minor']) - if len(children) not in (0, 2): - raise ValueError("Invalid configuration data %s" % str(children)) - if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6: - raise ValueError("Invalid configuration data %s" % str(unique_id)) - (self._lhost, self._lport, - self._rhost, self._rport, - self._aminor, self._secret) = unique_id if (self._lhost is not None and self._lhost == self._rhost and self._lport == self._rport): raise ValueError("Invalid configuration data, same local/remote %s" % @@ -1551,6 +1573,8 @@ class DRBD8(BaseDRBD): the attach if can return success. """ + # TODO: Rewrite to not use a for loop just because there is 'break' + # pylint: disable-msg=W0631 net_data = (self._lhost, self._lport, self._rhost, self._rport) for minor in (self._aminor,): info = self._GetDevInfo(self._GetShowData(minor)) @@ -1798,6 +1822,22 @@ class FileStorage(BlockDev): if err.errno != errno.ENOENT: _ThrowError("Can't remove file '%s': %s", self.dev_path, err) + def Rename(self, new_id): + """Renames the file. + + """ + # TODO: implement rename for file-based storage + _ThrowError("Rename is not supported for file-based storage") + + def Grow(self, amount): + """Grow the file + + @param amount: the amount (in mebibytes) to grow with + + """ + # TODO: implement grow for file-based storage + _ThrowError("Grow not supported for file-based storage") + def Attach(self): """Attach to an existing file. diff --git a/lib/build/__init__.py b/lib/build/__init__.py index 0f58b61..be7f32e 100644 --- a/lib/build/__init__.py +++ b/lib/build/__init__.py @@ -18,6 +18,7 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""Module used during the Ganeti build process""" import imp import os diff --git a/lib/cli.py b/lib/cli.py index 752546b..bbdb629 100644 --- a/lib/cli.py +++ b/lib/cli.py @@ -82,6 +82,7 @@ __all__ = [ "NODE_PLACEMENT_OPT", "NOHDR_OPT", "NOIPCHECK_OPT", + "NONAMECHECK_OPT", "NOLVM_STORAGE_OPT", "NOMODIFY_ETCHOSTS_OPT", "NOMODIFY_SSH_SETUP_OPT", @@ -169,7 +170,7 @@ UN_PREFIX = "-" class _Argument: - def __init__(self, min=0, max=None): + def __init__(self, min=0, max=None): # pylint: disable-msg=W0622 self.min = min self.max = max @@ -184,6 +185,7 @@ class ArgSuggest(_Argument): Value can be any of the ones passed to the constructor. """ + # pylint: disable-msg=W0622 def __init__(self, min=0, max=None, choices=None): _Argument.__init__(self, min=min, max=max) self.choices = choices @@ -250,7 +252,6 @@ ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] ARGS_ONE_NODE = [ArgNode(min=1, max=1)] - def _ExtractTagsObject(opts, args): """Extract the tag type object. @@ -311,8 +312,8 @@ def ListTags(opts, args): """ kind, name = _ExtractTagsObject(opts, args) - op = opcodes.OpGetTags(kind=kind, name=name) - result = SubmitOpCode(op) + cl = GetClient() + result = cl.QueryTags(kind, name) result = list(result) result.sort() for tag in result: @@ -353,7 +354,7 @@ def RemoveTags(opts, args): SubmitOpCode(op) -def check_unit(option, opt, value): +def check_unit(option, opt, value): # pylint: disable-msg=W0613 """OptParsers custom converter for units. """ @@ -400,7 +401,7 @@ def _SplitKeyVal(opt, data): return kv_dict -def check_ident_key_val(option, opt, value): +def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613 """Custom parser for ident:key=val,key=val options. This will store the parsed values as a tuple (ident, {key: val}). As such, @@ -428,7 +429,7 @@ def check_ident_key_val(option, opt, value): return retval -def check_key_val(option, opt, value): +def check_key_val(option, opt, value): # pylint: disable-msg=W0613 """Custom parser class for key=val,key=val options. This will store the parsed values as a dict {key: val}. @@ -597,6 +598,11 @@ NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True, help="Don't check that the instance's IP" " is alive") +NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check", + default=True, action="store_false", + help="Don't check that the instance's name" + " is resolvable") + NET_OPT = cli_option("--net", help="NIC parameters", default=[], dest="nics", action="append", type="identkeyval") @@ -1467,6 +1473,7 @@ def GenericInstanceCreate(mode, opts, args): nics=nics, pnode=pnode, snode=snode, ip_check=opts.ip_check, + name_check=opts.name_check, wait_for_sync=opts.wait_for_sync, file_storage_dir=opts.file_storage_dir, file_driver=opts.file_driver, @@ -1524,8 +1531,8 @@ def GenerateTable(headers, fields, separator, data, if unitfields is None: unitfields = [] - numfields = utils.FieldSet(*numfields) - unitfields = utils.FieldSet(*unitfields) + numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142 + unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142 format_fields = [] for field in fields: diff --git a/lib/cmdlib.py b/lib/cmdlib.py index c837cd9..4f8a8f2 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -21,7 +21,10 @@ """Module implementing the master-side code.""" -# pylint: disable-msg=W0613,W0201 +# pylint: disable-msg=W0201 + +# W0201 since most LU attributes are defined in CheckPrereq or similar +# functions import os import os.path @@ -87,9 +90,9 @@ class LogicalUnit(object): self.recalculate_locks = {} self.__ssh = None # logging - self.LogWarning = processor.LogWarning - self.LogInfo = processor.LogInfo - self.LogStep = processor.LogStep + self.LogWarning = processor.LogWarning # pylint: disable-msg=C0103 + self.LogInfo = processor.LogInfo # pylint: disable-msg=C0103 + self.LogStep = processor.LogStep # pylint: disable-msg=C0103 # support for dry-run self.dry_run_result = None @@ -277,6 +280,9 @@ class LogicalUnit(object): and hook results """ + # API must be kept, thus we ignore the unused argument and could + # be a function warnings + # pylint: disable-msg=W0613,R0201 return lu_result def _ExpandAndLockInstance(self): @@ -347,7 +353,7 @@ class LogicalUnit(object): del self.recalculate_locks[locking.LEVEL_NODE] -class NoHooksLU(LogicalUnit): +class NoHooksLU(LogicalUnit): # pylint: disable-msg=W0223 """Simple LU which runs no hooks. This LU is intended as a parent for other LogicalUnits which will @@ -357,6 +363,14 @@ class NoHooksLU(LogicalUnit): HPATH = None HTYPE = None + def BuildHooksEnv(self): + """Empty BuildHooksEnv for NoHooksLu. + + This just raises an error. + + """ + assert False, "BuildHooksEnv called for NoHooksLUs" + class Tasklet: """Tasklet base class. @@ -688,7 +702,7 @@ def _BuildInstanceHookEnvByObject(lu, instance, override=None): } if override: args.update(override) - return _BuildInstanceHookEnv(**args) + return _BuildInstanceHookEnv(**args) # pylint: disable-msg=W0142 def _AdjustCandidatePool(lu, exceptions): @@ -898,6 +912,7 @@ class LUDestroyCluster(LogicalUnit): try: hm.RunPhase(constants.HOOKS_PHASE_POST, [master]) except: + # pylint: disable-msg=W0702 self.LogWarning("Errors occurred running hooks on %s" % master) result = self.rpc.call_node_stop_master(master, False) @@ -944,6 +959,7 @@ class LUVerifyCluster(LogicalUnit): ENODESSH = (TNODE, "ENODESSH") ENODEVERSION = (TNODE, "ENODEVERSION") ENODESETUP = (TNODE, "ENODESETUP") + ENODETIME = (TNODE, "ENODETIME") ETYPE_FIELD = "code" ETYPE_ERROR = "ERROR" @@ -1017,7 +1033,7 @@ class LUVerifyCluster(LogicalUnit): """ node = nodeinfo.name - _ErrorIf = self._ErrorIf + _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 # main result, node_result should be a non-empty dict test = not node_result or not isinstance(node_result, dict) @@ -1151,7 +1167,7 @@ class LUVerifyCluster(LogicalUnit): # check that ':' is not present in PV names, since it's a # special character for lvcreate (denotes the range of PEs to # use on the PV) - for size, pvname, owner_vg in pvlist: + for _, pvname, owner_vg in pvlist: test = ":" in pvname _ErrorIf(test, self.ENODELVM, node, "Invalid character ':' in PV" " '%s' of VG '%s'", pvname, owner_vg) @@ -1164,7 +1180,7 @@ class LUVerifyCluster(LogicalUnit): available on the instance's node. """ - _ErrorIf = self._ErrorIf + _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 node_current = instanceconfig.primary_node node_vol_should = {} @@ -1279,7 +1295,7 @@ class LUVerifyCluster(LogicalUnit): """ self.bad = False - _ErrorIf = self._ErrorIf + _ErrorIf = self._ErrorIf # pylint: disable-msg=C0103 verbose = self.op.verbose self._feedback_fn = feedback_fn feedback_fn("* Verifying global settings") @@ -1326,14 +1342,23 @@ class LUVerifyCluster(LogicalUnit): constants.NV_VERSION: None, constants.NV_HVINFO: self.cfg.GetHypervisorType(), constants.NV_NODESETUP: None, + constants.NV_TIME: None, } + if vg_name is not None: node_verify_param[constants.NV_VGLIST] = None node_verify_param[constants.NV_LVLIST] = vg_name node_verify_param[constants.NV_PVLIST] = [vg_name] node_verify_param[constants.NV_DRBDLIST] = None + + # Due to the way our RPC system works, exact response times cannot be + # guaranteed (e.g. a broken node could run into a timeout). By keeping the + # time before and after executing the request, we can at least have a time + # window. + nvinfo_starttime = time.time() all_nvinfo = self.rpc.call_node_verify(nodelist, node_verify_param, self.cfg.GetClusterName()) + nvinfo_endtime = time.time() cluster = self.cfg.GetClusterInfo() master_node = self.cfg.GetMasterNode() @@ -1380,6 +1405,7 @@ class LUVerifyCluster(LogicalUnit): else: instance = instanceinfo[instance] node_drbd[minor] = (instance.name, instance.admin_up) + self._VerifyNode(node_i, file_names, local_checksums, nresult, master_files, node_drbd, vg_name) @@ -1413,6 +1439,27 @@ class LUVerifyCluster(LogicalUnit): if test: continue + # Node time + ntime = nresult.get(constants.NV_TIME, None) + try: + ntime_merged = utils.MergeTime(ntime) + except (ValueError, TypeError): + _ErrorIf(test, self.ENODETIME, node, "Node returned invalid time") + + if ntime_merged < (nvinfo_starttime - constants.NODE_MAX_CLOCK_SKEW): + ntime_diff = abs(nvinfo_starttime - ntime_merged) + elif ntime_merged > (nvinfo_endtime + constants.NODE_MAX_CLOCK_SKEW): + ntime_diff = abs(ntime_merged - nvinfo_endtime) + else: + ntime_diff = None + + _ErrorIf(ntime_diff is not None, self.ENODETIME, node, + "Node time diverges by at least %0.1fs from master node time", + ntime_diff) + + if ntime_diff is not None: + continue + try: node_info[node] = { "mfree": int(nodeinfo['memory_free']), @@ -1551,7 +1598,6 @@ class LUVerifyCluster(LogicalUnit): assert hooks_results, "invalid result from hooks" for node_name in hooks_results: - show_node_header = True res = hooks_results[node_name] msg = res.fail_msg test = msg and not res.offline @@ -1641,7 +1687,7 @@ class LUVerifyDisks(NoHooksLU): continue lvs = node_res.payload - for lv_name, (_, lv_inactive, lv_online) in lvs.items(): + for lv_name, (_, _, lv_online) in lvs.items(): inst = nv_dict.pop((node, lv_name), None) if (not lv_online and inst is not None and inst.name not in res_instances): @@ -1797,7 +1843,8 @@ class LURenameCluster(LogicalUnit): "NEW_NAME": self.op.name, } mn = self.cfg.GetMasterNode() - return env, [mn], [mn] + all_nodes = self.cfg.GetNodeList() + return env, [mn], all_nodes def CheckPrereq(self): """Verify that the passed name is a valid one. @@ -2275,10 +2322,9 @@ class LUDiagnoseOS(NoHooksLU): """ @staticmethod - def _DiagnoseByOS(node_list, rlist): + def _DiagnoseByOS(rlist): """Remaps a per-node return list into an a per-os per-node dictionary - @param node_list: a list with the names of all nodes @param rlist: a map with node names as keys and OS objects as values @rtype: dict @@ -2316,7 +2362,7 @@ class LUDiagnoseOS(NoHooksLU): """ valid_nodes = [node for node in self.cfg.GetOnlineNodeList()] node_data = self.rpc.call_os_diagnose(valid_nodes) - pol = self._DiagnoseByOS(valid_nodes, node_data) + pol = self._DiagnoseByOS(node_data) output = [] calc_valid = self._FIELDS_NEEDVALID.intersection(self.op.output_fields) calc_variants = "variants" in self.op.output_fields @@ -2432,8 +2478,9 @@ class LURemoveNode(LogicalUnit): # Run post hooks on the node before it's removed hm = self.proc.hmclass(self.rpc.call_hooks_runner, self) try: - h_results = hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name]) + hm.RunPhase(constants.HOOKS_PHASE_POST, [node.name]) except: + # pylint: disable-msg=W0702 self.LogWarning("Errors occurred running hooks on %s" % node.name) result = self.rpc.call_node_leave_cluster(node.name, modify_ssh_setup) @@ -2447,6 +2494,7 @@ class LUQueryNodes(NoHooksLU): """Logical unit for querying nodes. """ + # pylint: disable-msg=W0142 _OP_REQP = ["output_fields", "names", "use_locking"] REQ_BGL = False @@ -2549,7 +2597,7 @@ class LUQueryNodes(NoHooksLU): if inst_fields & frozenset(self.op.output_fields): inst_data = self.cfg.GetAllInstancesInfo() - for instance_name, inst in inst_data.items(): + for inst in inst_data.values(): if inst.primary_node in node_to_primary: node_to_primary[inst.primary_node].add(inst.name) for secnode in inst.secondary_nodes: @@ -2977,7 +3025,7 @@ class LUAddNode(LogicalUnit): # later in the procedure; this also means that if the re-add # fails, we are left with a non-offlined, broken node if self.op.readd: - new_node.drained = new_node.offline = False + new_node.drained = new_node.offline = False # pylint: disable-msg=W0201 self.LogInfo("Readding a node, the offline/drained flags were reset") # if we demote the node, we do cleanup later in the procedure new_node.master_candidate = self.master_candidate @@ -4024,7 +4072,7 @@ class LURecreateInstanceDisks(LogicalUnit): """ to_skip = [] - for idx, disk in enumerate(self.instance.disks): + for idx, _ in enumerate(self.instance.disks): if idx not in self.op.disks: # disk idx has not been passed in to_skip.append(idx) continue @@ -4219,6 +4267,7 @@ class LUQueryInstances(NoHooksLU): """Logical unit for querying instances. """ + # pylint: disable-msg=W0142 _OP_REQP = ["output_fields", "names", "use_locking"] REQ_BGL = False _SIMPLE_FIELDS = ["name", "os", "network_port", "hypervisor", @@ -4280,6 +4329,8 @@ class LUQueryInstances(NoHooksLU): """Computes the list of nodes and their attributes. """ + # pylint: disable-msg=R0912 + # way too many branches here all_info = self.cfg.GetAllInstancesInfo() if self.wanted == locking.ALL_SET: # caller didn't specify instance names, so ordering is not important @@ -4752,7 +4803,7 @@ class LUMoveInstance(LogicalUnit): for idx, dsk in enumerate(instance.disks): if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE): raise errors.OpPrereqError("Instance disk %d has a complex layout," - " cannot copy", errors.ECODE_STATE) + " cannot copy" % idx, errors.ECODE_STATE) _CheckNodeOnline(self, target_node) _CheckNodeNotDrained(self, target_node) @@ -5586,6 +5637,19 @@ class LUCreateInstance(LogicalUnit): "hvparams", "beparams"] REQ_BGL = False + def CheckArguments(self): + """Check arguments. + + """ + # do not require name_check to ease forward/backward compatibility + # for tools + if not hasattr(self.op, "name_check"): + self.op.name_check = True + if self.op.ip_check and not self.op.name_check: + # TODO: make the ip check more flexible and not depend on the name check + raise errors.OpPrereqError("Cannot do ip checks without a name check", + errors.ECODE_INVAL) + def _ExpandNode(self, node): """Expands and checks one node name. @@ -5650,8 +5714,14 @@ class LUCreateInstance(LogicalUnit): #### instance parameters check # instance name verification - hostname1 = utils.GetHostInfo(self.op.instance_name) - self.op.instance_name = instance_name = hostname1.name + if self.op.name_check: + hostname1 = utils.GetHostInfo(self.op.instance_name) + self.op.instance_name = instance_name = hostname1.name + # used in CheckPrereq for ip ping check + self.check_ip = hostname1.ip + else: + instance_name = self.op.instance_name + self.check_ip = None # this is just a preventive check, but someone might still add this # instance in the meantime, and creation will fail at lock-add time @@ -5680,6 +5750,10 @@ class LUCreateInstance(LogicalUnit): if ip is None or ip.lower() == constants.VALUE_NONE: nic_ip = None elif ip.lower() == constants.VALUE_AUTO: + if not self.op.name_check: + raise errors.OpPrereqError("IP address set to auto but name checks" + " have been skipped. Aborting.", + errors.ECODE_INVAL) nic_ip = hostname1.ip else: if not utils.IsValidIP(ip): @@ -5696,16 +5770,14 @@ class LUCreateInstance(LogicalUnit): # MAC address verification mac = nic.get("mac", constants.VALUE_AUTO) if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): - if not utils.IsValidMac(mac.lower()): - raise errors.OpPrereqError("Invalid MAC address specified: %s" % - mac, errors.ECODE_INVAL) - else: - try: - self.cfg.ReserveMAC(mac, self.proc.GetECId()) - except errors.ReservationError: - raise errors.OpPrereqError("MAC address %s already in use" - " in cluster" % mac, - errors.ECODE_NOTUNIQUE) + mac = utils.NormalizeAndValidateMac(mac) + + try: + self.cfg.ReserveMAC(mac, self.proc.GetECId()) + except errors.ReservationError: + raise errors.OpPrereqError("MAC address %s already in use" + " in cluster" % mac, + errors.ECODE_NOTUNIQUE) # bridge verification bridge = nic.get("bridge", None) @@ -5747,9 +5819,6 @@ class LUCreateInstance(LogicalUnit): errors.ECODE_INVAL) self.disks.append({"size": size, "mode": mode}) - # used in CheckPrereq for ip ping check - self.check_ip = hostname1.ip - # file storage checks if (self.op.file_driver and not self.op.file_driver in constants.FILE_DRIVER): @@ -5960,12 +6029,8 @@ class LUCreateInstance(LogicalUnit): nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) # ENDIF: self.op.mode == constants.INSTANCE_IMPORT - # ip ping checks (we use the same ip that was resolved in ExpandNames) - if self.op.start and not self.op.ip_check: - raise errors.OpPrereqError("Cannot ignore IP address conflicts when" - " adding an instance in start mode", - errors.ECODE_INVAL) + # ip ping checks (we use the same ip that was resolved in ExpandNames) if self.op.ip_check: if utils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): raise errors.OpPrereqError("IP %s of instance %s already in use" % @@ -6488,7 +6553,8 @@ class TLReplaceDisks(Tasklet): if len(ial.nodes) != ial.required_nodes: raise errors.OpPrereqError("iallocator '%s' returned invalid number" " of nodes (%s), required %s" % - (len(ial.nodes), ial.required_nodes), + (iallocator_name, + len(ial.nodes), ial.required_nodes), errors.ECODE_FAULT) remote_node_name = ial.nodes[0] @@ -6735,7 +6801,7 @@ class TLReplaceDisks(Tasklet): return iv_names def _CheckDevices(self, node_name, iv_names): - for name, (dev, old_lvs, new_lvs) in iv_names.iteritems(): + for name, (dev, _, _) in iv_names.iteritems(): self.cfg.SetDiskID(dev, node_name) result = self.rpc.call_blockdev_find(node_name, dev) @@ -6751,7 +6817,7 @@ class TLReplaceDisks(Tasklet): raise errors.OpExecError("DRBD device %s is degraded!" % name) def _RemoveOldStorage(self, node_name, iv_names): - for name, (dev, old_lvs, _) in iv_names.iteritems(): + for name, (_, old_lvs, _) in iv_names.iteritems(): self.lu.LogInfo("Remove logical volumes for %s" % name) for lv in old_lvs: @@ -6946,6 +7012,7 @@ class TLReplaceDisks(Tasklet): if self.instance.primary_node == o_node1: p_minor = o_minor1 else: + assert self.instance.primary_node == o_node2, "Three-node instance?" p_minor = o_minor2 new_alone_id = (self.instance.primary_node, self.new_node, None, @@ -7467,9 +7534,8 @@ class LUSetInstanceParams(LogicalUnit): if 'mac' in nic_dict: nic_mac = nic_dict['mac'] if nic_mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): - if not utils.IsValidMac(nic_mac): - raise errors.OpPrereqError("Invalid MAC address %s" % nic_mac, - errors.ECODE_INVAL) + nic_mac = utils.NormalizeAndValidateMac(nic_mac) + if nic_op != constants.DDM_ADD and nic_mac == constants.VALUE_AUTO: raise errors.OpPrereqError("'auto' is not a valid MAC address when" " modifying an existing nic", @@ -7539,7 +7605,8 @@ class LUSetInstanceParams(LogicalUnit): nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) return env, nl, nl - def _GetUpdatedParams(self, old_params, update_dict, + @staticmethod + def _GetUpdatedParams(old_params, update_dict, default_values, parameter_types): """Return the new params dict for the given params. @@ -7749,7 +7816,7 @@ class LUSetInstanceParams(LogicalUnit): raise errors.OpPrereqError("Disk operations not supported for" " diskless instances", errors.ECODE_INVAL) - for disk_op, disk_dict in self.op.disks: + for disk_op, _ in self.op.disks: if disk_op == constants.DDM_REMOVE: if len(instance.disks) == 1: raise errors.OpPrereqError("Cannot remove the last disk of" @@ -7793,7 +7860,6 @@ class LUSetInstanceParams(LogicalUnit): result = [] instance = self.instance - cluster = self.cluster # disk changes for disk_op, disk_dict in self.op.disks: if disk_op == constants.DDM_REMOVE: @@ -8184,7 +8250,7 @@ class LURemoveExport(NoHooksLU): " Domain Name.") -class TagsLU(NoHooksLU): +class TagsLU(NoHooksLU): # pylint: disable-msg=W0223 """Generic tags LU. This is an abstract class which is the parent of all the other tags LUs. @@ -8395,6 +8461,8 @@ class IAllocator(object): easy usage """ + # pylint: disable-msg=R0902 + # lots of instance attributes _ALLO_KEYS = [ "mem_size", "disks", "disk_template", "os", "tags", "nics", "vcpus", "hypervisor", diff --git a/lib/confd/client.py b/lib/confd/client.py index 674a2ca..e328868 100644 --- a/lib/confd/client.py +++ b/lib/confd/client.py @@ -50,7 +50,6 @@ confirming what you already got. # E0203: Access to member %r before its definition, since we use # objects.py which doesn't explicitely initialise its members -import socket import time import random @@ -130,6 +129,8 @@ class ConfdClient: @param peers: list of peer nodes """ + # we are actually called from init, so: + # pylint: disable-msg=W0201 if not isinstance(peers, list): raise errors.ProgrammerError("peers must be a list") self._peers = peers diff --git a/lib/confd/querylib.py b/lib/confd/querylib.py index 17621dd..e1e323f 100644 --- a/lib/confd/querylib.py +++ b/lib/confd/querylib.py @@ -50,7 +50,7 @@ class ConfdQuery(object): """ self.reader = reader - def Exec(self, query): + def Exec(self, query): # pylint: disable-msg=R0201,W0613 """Process a single UDP request from a client. Different queries should override this function, which by defaults returns @@ -94,16 +94,38 @@ class ClusterMasterQuery(ConfdQuery): It accepts no arguments, and returns the current cluster master. """ + def _GetMasterNode(self): + return self.reader.GetMasterNode() + def Exec(self, query): """ClusterMasterQuery main execution """ - if query is None: + if isinstance(query, dict): + if constants.CONFD_REQQ_FIELDS in query: + status = constants.CONFD_REPL_STATUS_OK + req_fields = query[constants.CONFD_REQQ_FIELDS] + if not isinstance(req_fields, (list, tuple)): + logging.debug("FIELDS request should be a list") + return QUERY_ARGUMENT_ERROR + + answer = [] + for field in req_fields: + if field == constants.CONFD_REQFIELD_NAME: + answer.append(self._GetMasterNode()) + elif field == constants.CONFD_REQFIELD_IP: + answer.append(self.reader.GetMasterIP()) + elif field == constants.CONFD_REQFIELD_MNODE_PIP: + answer.append(self.reader.GetNodePrimaryIp(self._GetMasterNode())) + else: + logging.debug("missing FIELDS in query dict") + return QUERY_ARGUMENT_ERROR + elif not query: status = constants.CONFD_REPL_STATUS_OK answer = self.reader.GetMasterNode() else: - status = constants.CONFD_REPL_STATUS_ERROR - answer = 'master query accepts no query argument' + logging.debug("Invalid master query argument: not dict or empty") + return QUERY_ARGUMENT_ERROR return status, answer @@ -165,7 +187,6 @@ class InstanceIpToNodePrimaryIpQuery(ConfdQuery): instances_list = query[constants.CONFD_REQQ_IPLIST] mode = constants.CONFD_REQQ_IPLIST else: - status = constants.CONFD_REPL_STATUS_ERROR logging.debug("missing IP or IPLIST in query dict") return QUERY_ARGUMENT_ERROR @@ -179,40 +200,41 @@ class InstanceIpToNodePrimaryIpQuery(ConfdQuery): network_link = None mode = constants.CONFD_REQQ_IP else: - logging.debug("Invalid query argument type for: %s" % query) + logging.debug("Invalid query argument type for: %s", query) return QUERY_ARGUMENT_ERROR pnodes_list = [] for instance_ip in instances_list: if not isinstance(instance_ip, basestring): - logging.debug("Invalid IP type for: %s" % instance_ip) + logging.debug("Invalid IP type for: %s", instance_ip) return QUERY_ARGUMENT_ERROR instance = self.reader.GetInstanceByLinkIp(instance_ip, network_link) if not instance: - logging.debug("Unknown instance IP: %s" % instance_ip) + logging.debug("Unknown instance IP: %s", instance_ip) pnodes_list.append(QUERY_UNKNOWN_ENTRY_ERROR) continue pnode = self.reader.GetInstancePrimaryNode(instance) if not pnode: logging.error("Instance '%s' doesn't have an associated primary" - " node" % instance) + " node", instance) pnodes_list.append(QUERY_INTERNAL_ERROR) continue pnode_primary_ip = self.reader.GetNodePrimaryIp(pnode) if not pnode_primary_ip: logging.error("Primary node '%s' doesn't have an associated" - " primary IP" % pnode) + " primary IP", pnode) pnodes_list.append(QUERY_INTERNAL_ERROR) continue pnodes_list.append((constants.CONFD_REPL_STATUS_OK, pnode_primary_ip)) - # If a single ip was requested, return a single answer, otherwise the whole - # list, with a success status (since each entry has its own success/failure) + # If a single ip was requested, return a single answer, otherwise + # the whole list, with a success status (since each entry has its + # own success/failure) if mode == constants.CONFD_REQQ_IP: return pnodes_list[0] diff --git a/lib/confd/server.py b/lib/confd/server.py index 1bbf26f..6c7f24d 100644 --- a/lib/confd/server.py +++ b/lib/confd/server.py @@ -100,7 +100,7 @@ class ConfdProcessor(object): payload_out = self.PackReply(reply, rsalt) return payload_out except errors.ConfdRequestError, err: - logging.info('Ignoring broken query from %s:%d: %s' % (ip, port, err)) + logging.info('Ignoring broken query from %s:%d: %s', ip, port, err) return None def ExtractRequest(self, payload): @@ -110,7 +110,7 @@ class ConfdProcessor(object): """ current_time = time.time() - logging.debug("Extracting request with size: %d" % (len(payload))) + logging.debug("Extracting request with size: %d", len(payload)) try: (message, salt) = serializer.LoadSigned(payload, self.hmac_key) except errors.SignatureError, err: @@ -142,7 +142,7 @@ class ConfdProcessor(object): @return: tuple of reply and salt to add to the signature """ - logging.debug("Processing request: %s" % request) + logging.debug("Processing request: %s", request) if request.protocol != constants.CONFD_PROTOCOL_VERSION: msg = "wrong protocol version %d" % request.protocol raise errors.ConfdRequestError(msg) @@ -165,7 +165,7 @@ class ConfdProcessor(object): serial=self.reader.GetConfigSerialNo(), ) - logging.debug("Sending reply: %s" % reply) + logging.debug("Sending reply: %s", reply) return (reply, rsalt) diff --git a/lib/config.py b/lib/config.py index a55906f..47de2b5 100644 --- a/lib/config.py +++ b/lib/config.py @@ -424,7 +424,7 @@ class ConfigWriter: node.offline)) # drbd minors check - d_map, duplicates = self._UnlockedComputeDRBDMap() + _, duplicates = self._UnlockedComputeDRBDMap() for node, minor, instance_a, instance_b in duplicates: result.append("DRBD minor %d on node %s is assigned twice to instances" " %s and %s" % (minor, node, instance_a, instance_b)) @@ -826,9 +826,9 @@ class ConfigWriter: """ if not item.uuid: item.uuid = self._GenerateUniqueID(ec_id) - elif item.uuid in self._AllIDs(temporary=True): - raise errors.ConfigurationError("Cannot add '%s': UUID already in use" % - (item.name, item.uuid)) + elif item.uuid in self._AllIDs(include_temporary=True): + raise errors.ConfigurationError("Cannot add '%s': UUID %s already" + " in use" % (item.name, item.uuid)) def _SetInstanceStatus(self, instance_name, status): """Set the instance's status to a given value. diff --git a/lib/constants.py b/lib/constants.py index 7fb61a2..c20e65b 100644 --- a/lib/constants.py +++ b/lib/constants.py @@ -325,7 +325,7 @@ DEFAULT_MAC_PREFIX = "aa:00:00" LVM_STRIPECOUNT = _autoconf.LVM_STRIPECOUNT # default maximum instance wait time, in seconds. DEFAULT_SHUTDOWN_TIMEOUT = 120 - +NODE_MAX_CLOCK_SKEW = 150 # RPC constants (RPC_ENCODING_NONE, @@ -396,6 +396,7 @@ HV_DEVICE_MODEL = "device_model" HV_INIT_SCRIPT = "init_script" HV_MIGRATION_PORT = "migration_port" HV_USE_LOCALTIME = "use_localtime" +HV_DISK_CACHE = "disk_cache" HVS_PARAMETER_TYPES = { HV_BOOT_ORDER: VTYPE_STRING, @@ -422,6 +423,7 @@ HVS_PARAMETER_TYPES = { HV_INIT_SCRIPT: VTYPE_STRING, HV_MIGRATION_PORT: VTYPE_INT, HV_USE_LOCALTIME: VTYPE_BOOL, + HV_DISK_CACHE: VTYPE_STRING, } HVS_PARAMETERS = frozenset(HVS_PARAMETER_TYPES.keys()) @@ -496,6 +498,15 @@ HT_DISK_SD = "sd" HT_DISK_MTD = "mtd" HT_DISK_PFLASH = "pflash" +HT_CACHE_DEFAULT = "default" +HT_CACHE_NONE = "none" +HT_CACHE_WTHROUGH = "writethrough" +HT_CACHE_WBACK = "writeback" +HT_VALID_CACHE_TYPES = frozenset([HT_CACHE_DEFAULT, + HT_CACHE_NONE, + HT_CACHE_WTHROUGH, + HT_CACHE_WBACK]) + HT_HVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IOEMU]) HT_KVM_VALID_DISK_TYPES = frozenset([HT_DISK_PARAVIRTUAL, HT_DISK_IDE, HT_DISK_SCSI, HT_DISK_SD, HT_DISK_MTD, @@ -531,6 +542,7 @@ NV_LVLIST = "lvlist" NV_PVLIST = "pvlist" NV_DRBDLIST = "drbd-list" NV_NODESETUP = "nodesetup" +NV_TIME = "time" # Allocator framework constants IALLOCATOR_VERSION = 2 @@ -650,6 +662,7 @@ HVC_DEFAULTS = { HV_USB_MOUSE: '', HV_MIGRATION_PORT: 8102, HV_USE_LOCALTIME: False, + HV_DISK_CACHE: HT_CACHE_DEFAULT, }, HT_FAKE: { }, @@ -691,6 +704,11 @@ CONFD_REQ_INSTANCES_IPS_LIST = 6 CONFD_REQQ_LINK = "0" CONFD_REQQ_IP = "1" CONFD_REQQ_IPLIST = "2" +CONFD_REQQ_FIELDS = "3" + +CONFD_REQFIELD_NAME = "0" +CONFD_REQFIELD_IP = "1" +CONFD_REQFIELD_MNODE_PIP = "2" CONFD_REQS = frozenset([ CONFD_REQ_PING, @@ -727,7 +745,7 @@ CONFD_ERROR_ARGUMENT = 3 # Each request is "salted" by the current timestamp. # This constants decides how many seconds of skew to accept. # TODO: make this a default and allow the value to be more configurable -CONFD_MAX_CLOCK_SKEW = 300 +CONFD_MAX_CLOCK_SKEW = 2 * NODE_MAX_CLOCK_SKEW # When we haven't reloaded the config for more than this amount of seconds, we # force a test to see if inotify is betraying us. diff --git a/lib/daemon.py b/lib/daemon.py index 6fb2f66..09c1393 100644 --- a/lib/daemon.py +++ b/lib/daemon.py @@ -105,7 +105,7 @@ class AsyncUDPSocket(asyncore.dispatcher): raise ip, port = address self.handle_datagram(payload, ip, port) - except: + except: # pylint: disable-msg=W0702 # we need to catch any exception here, log it, but proceed, because even # if we failed handling a single request, we still want to continue. logging.error("Unexpected exception", exc_info=True) @@ -139,7 +139,7 @@ class AsyncUDPSocket(asyncore.dispatcher): else: raise self._out_queue.pop(0) - except: + except: # pylint: disable-msg=W0702 # we need to catch any exception here, log it, but proceed, because even # if we failed sending a single datagram we still want to continue. logging.error("Unexpected exception", exc_info=True) diff --git a/lib/errors.py b/lib/errors.py index 2149600..a50b037 100644 --- a/lib/errors.py +++ b/lib/errors.py @@ -384,7 +384,7 @@ def EncodeException(err): def MaybeRaise(result): - """Is this looks like an encoded Ganeti exception, raise it. + """If this looks like an encoded Ganeti exception, raise it. This function tries to parse the passed argument and if it looks like an encoding done by EncodeException, it will re-raise it. diff --git a/lib/http/__init__.py b/lib/http/__init__.py index a1f5e86..6f0d95c 100644 --- a/lib/http/__init__.py +++ b/lib/http/__init__.py @@ -299,13 +299,15 @@ class HttpVersionNotSupported(HttpException): code = 505 -class HttpJsonConverter: +class HttpJsonConverter: # pylint: disable-msg=W0232 CONTENT_TYPE = "application/json" - def Encode(self, data): + @staticmethod + def Encode(data): return serializer.DumpJson(data) - def Decode(self, data): + @staticmethod + def Decode(data): return serializer.LoadJson(data) @@ -340,7 +342,7 @@ def WaitForSocketCondition(sock, event, timeout): if not io_events: # Timeout return None - for (evfd, evcond) in io_events: + for (_, evcond) in io_events: if evcond & check: return evcond finally: @@ -638,6 +640,8 @@ class HttpBase(object): we do on our side. """ + # some parameters are unused, but this is the API + # pylint: disable-msg=W0613 assert self._ssl_params, "SSL not initialized" return (self._ssl_cert.digest("sha1") == cert.digest("sha1") and diff --git a/lib/http/auth.py b/lib/http/auth.py index 15fa986..a13c60f 100644 --- a/lib/http/auth.py +++ b/lib/http/auth.py @@ -97,6 +97,9 @@ class HttpServerRequestAuthentication(object): @return: Authentication realm """ + # today we don't have per-request filtering, but we might want to + # add it in the future + # pylint: disable-msg=W0613 return self.AUTH_REALM def PreHandleRequest(self, req): diff --git a/lib/http/client.py b/lib/http/client.py index 5b76cf3..490e4da 100644 --- a/lib/http/client.py +++ b/lib/http/client.py @@ -261,7 +261,7 @@ class HttpClientRequestExecutor(http.HttpBase): # Do the secret SSL handshake if self.using_ssl: - self.sock.set_connect_state() + self.sock.set_connect_state() # pylint: disable-msg=E1103 try: http.Handshake(self.sock, self.WRITE_TIMEOUT) except http.HttpSessionHandshakeUnexpectedEOF: @@ -333,7 +333,7 @@ class HttpClientWorker(workerpool.BaseWorker): """HTTP client worker class. """ - def RunTask(self, pend_req): + def RunTask(self, pend_req): # pylint: disable-msg=W0221 try: HttpClientRequestExecutor(pend_req.request) finally: diff --git a/lib/http/server.py b/lib/http/server.py index d7e374c..c49cb4b 100644 --- a/lib/http/server.py +++ b/lib/http/server.py @@ -26,7 +26,6 @@ import BaseHTTPServer import cgi import logging import os -import select import socket import time import signal @@ -504,7 +503,7 @@ class HttpServer(http.HttpBase, asyncore.dispatcher): for child in self._children: try: - pid, status = os.waitpid(child, os.WNOHANG) + pid, _ = os.waitpid(child, os.WNOHANG) except os.error: pid = None if pid and pid in self._children: @@ -514,6 +513,7 @@ class HttpServer(http.HttpBase, asyncore.dispatcher): """Called for each incoming connection """ + # pylint: disable-msg=W0212 (connection, client_addr) = self.socket.accept() self._CollectChildren(False) @@ -533,7 +533,7 @@ class HttpServer(http.HttpBase, asyncore.dispatcher): self.socket = None self.request_executor(self, connection, client_addr) - except Exception: + except Exception: # pylint: disable-msg=W0703 logging.exception("Error while handling request from %s:%s", client_addr[0], client_addr[1]) os._exit(1) diff --git a/lib/hypervisor/hv_base.py b/lib/hypervisor/hv_base.py index eacc0ec..86b85c9 100644 --- a/lib/hypervisor/hv_base.py +++ b/lib/hypervisor/hv_base.py @@ -190,7 +190,7 @@ class BaseHypervisor(object): """ raise NotImplementedError - def MigrationInfo(self, instance): + def MigrationInfo(self, instance): # pylint: disable-msg=R0201,W0613 """Get instance information to perform a migration. By default assume no information is needed. @@ -310,8 +310,8 @@ class BaseHypervisor(object): """ raise NotImplementedError - - def GetLinuxNodeInfo(self): + @staticmethod + def GetLinuxNodeInfo(): """For linux systems, return actual OS information. This is an abstraction for all non-hypervisor-based classes, where diff --git a/lib/hypervisor/hv_chroot.py b/lib/hypervisor/hv_chroot.py index 6a813ac..954c922 100644 --- a/lib/hypervisor/hv_chroot.py +++ b/lib/hypervisor/hv_chroot.py @@ -27,10 +27,9 @@ import os import os.path import time import logging -from cStringIO import StringIO from ganeti import constants -from ganeti import errors +from ganeti import errors # pylint: disable-msg=W0611 from ganeti import utils from ganeti.hypervisor import hv_base from ganeti.errors import HypervisorError @@ -97,7 +96,7 @@ class ChrootManager(hv_base.BaseHypervisor): fh = open("/proc/mounts", "r") try: for line in fh: - fstype, mountpoint, rest = line.split(" ", 2) + _, mountpoint, _ = line.split(" ", 2) if (mountpoint.startswith(path) and mountpoint != path): data.append(mountpoint) @@ -255,3 +254,23 @@ class ChrootManager(hv_base.BaseHypervisor): """ if not os.path.exists(self._ROOT_DIR): return "The required directory '%s' does not exist." % self._ROOT_DIR + + @classmethod + def PowercycleNode(cls): + """Chroot powercycle, just a wrapper over Linux powercycle. + + """ + cls.LinuxPowercycle() + + def MigrateInstance(self, instance, target, live): + """Migrate an instance. + + @type instance: L{object.Instance} + @param instance: the instance to be migrated + @type target: string + @param target: hostname (usually ip) of the target node + @type live: boolean + @param live: whether to do a live or non-live migration + + """ + raise HypervisorError("Migration not supported by the chroot hypervisor") diff --git a/lib/hypervisor/hv_fake.py b/lib/hypervisor/hv_fake.py index d53fff7..bf4a654 100644 --- a/lib/hypervisor/hv_fake.py +++ b/lib/hypervisor/hv_fake.py @@ -25,6 +25,7 @@ import os import os.path +import logging from ganeti import utils from ganeti import constants @@ -68,7 +69,7 @@ class FakeHypervisor(hv_base.BaseHypervisor): try: inst_id = fh.readline().strip() memory = utils.TryConvert(int, fh.readline().strip()) - vcpus = utils.TryConvert(fh.readline().strip()) + vcpus = utils.TryConvert(int, fh.readline().strip()) stat = "---b-" times = "0" return (instance_name, inst_id, memory, vcpus, stat, times) @@ -106,6 +107,44 @@ class FakeHypervisor(hv_base.BaseHypervisor): raise errors.HypervisorError("Failed to list instances: %s" % err) return data + + def _InstanceFile(self, instance_name): + """Compute the instance file for an instance name. + + """ + return self._ROOT_DIR + "/%s" % instance_name + + def _IsAlive(self, instance_name): + """Checks if an instance is alive. + + """ + file_name = self._InstanceFile(instance_name) + return os.path.exists(file_name) + + def _MarkUp(self, instance): + """Mark the instance as running. + + This does no checks, which should be done by its callers. + + """ + file_name = self._InstanceFile(instance.name) + fh = file(file_name, "w") + try: + fh.write("0\n%d\n%d\n" % + (instance.beparams[constants.BE_MEMORY], + instance.beparams[constants.BE_VCPUS])) + finally: + fh.close() + + def _MarkDown(self, instance): + """Mark the instance as running. + + This does no checks, which should be done by its callers. + + """ + file_name = self._InstanceFile(instance.name) + utils.RemoveFile(file_name) + def StartInstance(self, instance, block_devices): """Start an instance. @@ -114,18 +153,11 @@ class FakeHypervisor(hv_base.BaseHypervisor): handle race conditions properly, since these are *FAKE* instances. """ - file_name = self._ROOT_DIR + "/%s" % instance.name - if os.path.exists(file_name): + if self._IsAlive(instance.name): raise errors.HypervisorError("Failed to start instance %s: %s" % (instance.name, "already running")) try: - fh = file(file_name, "w") - try: - fh.write("0\n%d\n%d\n" % - (instance.beparams[constants.BE_MEMORY], - instance.beparams[constants.BE_VCPUS])) - finally: - fh.close() + self._MarkUp(instance) except IOError, err: raise errors.HypervisorError("Failed to start instance %s: %s" % (instance.name, err)) @@ -137,11 +169,10 @@ class FakeHypervisor(hv_base.BaseHypervisor): dir, if it exist, otherwise we raise an exception. """ - file_name = self._ROOT_DIR + "/%s" % instance.name - if not os.path.exists(file_name): + if not self._IsAlive(instance.name): raise errors.HypervisorError("Failed to stop instance %s: %s" % (instance.name, "not running")) - utils.RemoveFile(file_name) + self._MarkDown(instance) def RebootInstance(self, instance): """Reboot an instance. @@ -192,3 +223,48 @@ class FakeHypervisor(hv_base.BaseHypervisor): """ cls.LinuxPowercycle() + + def AcceptInstance(self, instance, info, target): + """Prepare to accept an instance. + + @type instance: L{objects.Instance} + @param instance: instance to be accepted + @type info: string + @param info: instance info, not used + @type target: string + @param target: target host (usually ip), on this node + + """ + if self._IsAlive(instance.name): + raise errors.HypervisorError("Can't accept instance, already running") + + def MigrateInstance(self, instance, target, live): + """Migrate an instance. + + @type instance: L{object.Instance} + @param instance: the instance to be migrated + @type target: string + @param target: hostname (usually ip) of the target node + @type live: boolean + @param live: whether to do a live or non-live migration + + """ + logging.debug("Fake hypervisor migrating %s to %s (live=%s)", + instance, target, live) + + self._MarkDown(instance) + + def FinalizeMigration(self, instance, info, success): + """Finalize an instance migration. + + For the fake hv, this just marks the instance up. + + @type instance: L{objects.Instance} + @param instance: instance whose migration is being finalized + + """ + if success: + self._MarkUp(instance) + else: + # ensure it's down + self._MarkDown(instance) diff --git a/lib/hypervisor/hv_kvm.py b/lib/hypervisor/hv_kvm.py index a153d36..92a2854 100644 --- a/lib/hypervisor/hv_kvm.py +++ b/lib/hypervisor/hv_kvm.py @@ -74,6 +74,8 @@ class KVMHypervisor(hv_base.BaseHypervisor): hv_base.ParamInSet(False, constants.HT_KVM_VALID_MOUSE_TYPES), constants.HV_MIGRATION_PORT: hv_base.NET_PORT_CHECK, constants.HV_USE_LOCALTIME: hv_base.NO_CHECK, + constants.HV_DISK_CACHE: + hv_base.ParamInSet(True, constants.HT_VALID_CACHE_TYPES), } _MIGRATION_STATUS_RE = re.compile('Migration\s+status:\s+(\w+)', @@ -92,16 +94,32 @@ class KVMHypervisor(hv_base.BaseHypervisor): dirs = [(dname, constants.RUN_DIRS_MODE) for dname in self._DIRS] utils.EnsureDirs(dirs) + def _InstancePidFile(self, instance_name): + """Returns the instance pidfile. + + """ + pidfile = "%s/%s" % (self._PIDS_DIR, instance_name) + return pidfile + def _InstancePidAlive(self, instance_name): """Returns the instance pid and pidfile """ - pidfile = "%s/%s" % (self._PIDS_DIR, instance_name) + pidfile = self._InstancePidFile(instance_name) pid = utils.ReadPidFile(pidfile) alive = utils.IsProcessAlive(pid) return (pidfile, pid, alive) + def _CheckDown(self, instance_name): + """Raises an error unless the given instance is down. + + """ + alive = self._InstancePidAlive(instance_name)[2] + if alive: + raise errors.HypervisorError("Failed to start instance %s: %s" % + (instance_name, "already running")) + @classmethod def _InstanceMonitor(cls, instance_name): """Returns the instance monitor socket name @@ -183,6 +201,8 @@ class KVMHypervisor(hv_base.BaseHypervisor): script.write(" # Connect the interface to the bridge\n") script.write(" /usr/sbin/brctl addif $BRIDGE $INTERFACE\n") elif nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_ROUTED: + if not nic.ip: + raise errors.HypervisorError("nic/%d is routed, but has no ip." % seq) script.write(" # Route traffic targeted at the IP to the interface\n") if nic.nicparams[constants.NIC_LINK]: script.write(" while /sbin/ip rule del dev $INTERFACE; do :; done\n") @@ -236,7 +256,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): @return: tuple (name, id, memory, vcpus, stat, times) """ - pidfile, pid, alive = self._InstancePidAlive(instance_name) + _, pid, alive = self._InstancePidAlive(instance_name) if not alive: return None @@ -274,7 +294,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): if utils.IsProcessAlive(utils.ReadPidFile(filename)): try: info = self.GetInstanceInfo(name) - except errors.HypervisorError, err: + except errors.HypervisorError: continue if info: data.append(info) @@ -285,7 +305,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): """Generate KVM information to start an instance. """ - pidfile, pid, alive = self._InstancePidAlive(instance.name) + pidfile = self._InstancePidFile(instance.name) kvm = constants.KVM_PATH kvm_cmd = [kvm] kvm_cmd.extend(['-m', instance.beparams[constants.BE_MEMORY]]) @@ -310,6 +330,12 @@ class KVMHypervisor(hv_base.BaseHypervisor): if_val = ',if=virtio' else: if_val = ',if=%s' % disk_type + # Cache mode + disk_cache = hvp[constants.HV_DISK_CACHE] + if disk_cache != constants.HT_CACHE_DEFAULT: + cache_val = ",cache=%s" % disk_cache + else: + cache_val = "" for cfdev, dev_path in block_devices: if cfdev.mode != constants.DISK_RDWR: raise errors.HypervisorError("Instance has read-only disks which" @@ -323,7 +349,8 @@ class KVMHypervisor(hv_base.BaseHypervisor): else: boot_val = '' - drive_val = 'file=%s,format=raw%s%s' % (dev_path, if_val, boot_val) + drive_val = 'file=%s,format=raw%s%s%s' % (dev_path, if_val, boot_val, + cache_val) kvm_cmd.extend(['-drive', drive_val]) iso_image = hvp[constants.HV_CDROM_IMAGE_PATH] @@ -365,9 +392,8 @@ class KVMHypervisor(hv_base.BaseHypervisor): vnc_arg = '%s:%d' % (vnc_bind_address, display) else: logging.error("Network port is not a valid VNC display (%d < %d)." - " Not starting VNC" % - (instance.network_port, - constants.VNC_BASE_PORT)) + " Not starting VNC", instance.network_port, + constants.VNC_BASE_PORT) vnc_arg = 'none' # Only allow tls and other option when not binding to a file, for now. @@ -460,11 +486,9 @@ class KVMHypervisor(hv_base.BaseHypervisor): @param incoming: (target_host_ip, port) """ - pidfile, pid, alive = self._InstancePidAlive(instance.name) hvp = instance.hvparams - if alive: - raise errors.HypervisorError("Failed to start instance %s: %s" % - (instance.name, "already running")) + name = instance.name + self._CheckDown(name) temp_files = [] @@ -502,12 +526,10 @@ class KVMHypervisor(hv_base.BaseHypervisor): result = utils.RunCmd(kvm_cmd) if result.failed: raise errors.HypervisorError("Failed to start instance %s: %s (%s)" % - (instance.name, result.fail_reason, - result.output)) + (name, result.fail_reason, result.output)) - if not utils.IsProcessAlive(utils.ReadPidFile(pidfile)): - raise errors.HypervisorError("Failed to start instance %s" % - (instance.name)) + if not self._InstancePidAlive(name)[2]: + raise errors.HypervisorError("Failed to start instance %s" % name) if vnc_pwd: change_cmd = 'change vnc password %s' % vnc_pwd @@ -520,11 +542,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): """Start an instance. """ - pidfile, pid, alive = self._InstancePidAlive(instance.name) - if alive: - raise errors.HypervisorError("Failed to start instance %s: %s" % - (instance.name, "already running")) - + self._CheckDown(instance.name) kvm_runtime = self._GenerateKVMRuntime(instance, block_devices) self._SaveKVMRuntime(instance, kvm_runtime) self._ExecuteKVMRuntime(instance, kvm_runtime) @@ -571,7 +589,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): # For some reason if we do a 'send-key ctrl-alt-delete' to the control # socket the instance will stop, but now power up again. So we'll resort # to shutdown and restart. - pidfile, pid, alive = self._InstancePidAlive(instance.name) + _, _, alive = self._InstancePidAlive(instance.name) if not alive: raise errors.HypervisorError("Failed to reboot instance %s:" " not running" % instance.name) @@ -675,7 +693,7 @@ class KVMHypervisor(hv_base.BaseHypervisor): raise errors.HypervisorError("Migration %s at the kvm level" % status) else: - logging.info("KVM: unknown migration status '%s'" % status) + logging.info("KVM: unknown migration status '%s'", status) time.sleep(2) utils.KillProcess(pid) diff --git a/lib/hypervisor/hv_xen.py b/lib/hypervisor/hv_xen.py index 6352893..2eb2cc5 100644 --- a/lib/hypervisor/hv_xen.py +++ b/lib/hypervisor/hv_xen.py @@ -23,8 +23,6 @@ """ -import os -import os.path import logging from cStringIO import StringIO @@ -306,13 +304,12 @@ class XenHypervisor(hv_base.BaseHypervisor): return "'xm info' failed: %s, %s" % (result.fail_reason, result.output) @staticmethod - def _GetConfigFileDiskData(disk_template, block_devices): + def _GetConfigFileDiskData(block_devices): """Get disk directive for xen config file. This method builds the xen config disk directive according to the given disk_template and block_devices. - @param disk_template: string containing instance disk template @param block_devices: list of tuples (cfdev, rldev): - cfdev: dict containing ganeti config disk part - rldev: ganeti.bdev.BlockDev object @@ -503,10 +500,10 @@ class XenPvmHypervisor(XenHypervisor): if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: nic_str += ", bridge=%s" % nic.nicparams[constants.NIC_LINK] + disk_data = cls._GetConfigFileDiskData(block_devices) + config.write("vif = [%s]\n" % ",".join(vif_data)) - config.write("disk = [%s]\n" % ",".join( - cls._GetConfigFileDiskData(instance.disk_template, - block_devices))) + config.write("disk = [%s]\n" % ",".join(disk_data)) config.write("root = '%s'\n" % hvp[constants.HV_ROOT_PATH]) config.write("on_poweroff = 'destroy'\n") @@ -632,8 +629,7 @@ class XenHvmHypervisor(XenHypervisor): nic_str += ", bridge=%s" % nic.nicparams[constants.NIC_LINK] config.write("vif = [%s]\n" % ",".join(vif_data)) - disk_data = cls._GetConfigFileDiskData(instance.disk_template, - block_devices) + disk_data = cls._GetConfigFileDiskData(block_devices) disk_type = hvp[constants.HV_DISK_TYPE] if disk_type in (None, constants.HT_DISK_IOEMU): replacement = ",ioemu:hd" diff --git a/lib/jqueue.py b/lib/jqueue.py index 13f112f..2c2345b 100644 --- a/lib/jqueue.py +++ b/lib/jqueue.py @@ -154,6 +154,7 @@ class _QueuedJob(object): @ivar change: a Condition variable we use for waiting for job changes """ + # pylint: disable-msg=W0212 __slots__ = ["queue", "id", "ops", "log_serial", "received_timestamp", "start_timestamp", "end_timestamp", "lock_status", "change", @@ -419,7 +420,7 @@ class _JobQueueWorker(workerpool.BaseWorker): """The actual job workers. """ - def RunTask(self, job): + def RunTask(self, job): # pylint: disable-msg=W0221 """Job executor. This functions processes a job. It is closely tied to the _QueuedJob and @@ -559,6 +560,7 @@ def _RequireOpenQueue(fn): """ def wrapper(self, *args, **kwargs): + # pylint: disable-msg=W0212 assert self._queue_lock is not None, "Queue should be open" return fn(self, *args, **kwargs) return wrapper @@ -723,7 +725,8 @@ class JobQueue(object): except KeyError: pass - def _CheckRpcResult(self, result, nodes, failmsg): + @staticmethod + def _CheckRpcResult(result, nodes, failmsg): """Verifies the status of an RPC call. Since we aim to keep consistency should this node (the current @@ -744,8 +747,8 @@ class JobQueue(object): msg = result[node].fail_msg if msg: failed.append(node) - logging.error("RPC call %s failed on node %s: %s", - result[node].call, node, msg) + logging.error("RPC call %s (%s) failed on node %s: %s", + result[node].call, failmsg, node, msg) else: success.append(node) @@ -804,7 +807,8 @@ class JobQueue(object): result = rpc.RpcRunner.call_jobqueue_rename(names, addrs, rename) self._CheckRpcResult(result, self._nodes, "Renaming files (%r)" % rename) - def _FormatJobID(self, job_id): + @staticmethod + def _FormatJobID(job_id): """Convert a job ID to string format. Currently this just does C{str(job_id)} after performing some @@ -919,6 +923,7 @@ class JobQueue(object): @return: the list of job IDs """ + # pylint: disable-msg=W0613 jlist = [self._ExtractJobID(name) for name in self._ListJobFiles()] jlist = utils.NiceSort(jlist) return jlist @@ -963,7 +968,7 @@ class JobQueue(object): try: job = _QueuedJob.Restore(self, data) - except Exception, err: + except Exception, err: # pylint: disable-msg=W0703 new_path = self._GetArchivedJobPath(job_id) if filepath == new_path: # job already archived (future case) @@ -1342,7 +1347,8 @@ class JobQueue(object): return (archived_count, len(all_job_ids) - last_touched - 1) - def _GetJobInfoUnlocked(self, job, fields): + @staticmethod + def _GetJobInfoUnlocked(job, fields): """Returns information about a job. @type job: L{_QueuedJob} diff --git a/lib/jstore.py b/lib/jstore.py index f12f704..723cda1 100644 --- a/lib/jstore.py +++ b/lib/jstore.py @@ -130,11 +130,12 @@ def InitAndVerifyQueue(must_lock): if serial is None: # There must be a serious problem - raise errors.JobQueueError("Can't read/parse the job queue serial file") + raise errors.JobQueueError("Can't read/parse the job queue" + " serial file") if not must_lock: - # There's no need for more error handling. Closing the lock file below - # in case of an error will unlock it anyway. + # There's no need for more error handling. Closing the lock + # file below in case of an error will unlock it anyway. queue_lock.Unlock() except: diff --git a/lib/locking.py b/lib/locking.py index 08399a1..6ba74f2 100644 --- a/lib/locking.py +++ b/lib/locking.py @@ -18,11 +18,13 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. -# Disable "Invalid name ..." message -# pylint: disable-msg=C0103 - """Module implementing the Ganeti locking code.""" +# pylint: disable-msg=W0212 + +# W0212 since e.g. LockSet methods use (a lot) the internals of +# SharedLock + import os import select import threading @@ -285,7 +287,7 @@ class SingleNotifyPipeCondition(_BaseCondition): if self._nwaiters == 0: self._Cleanup() - def notifyAll(self): + def notifyAll(self): # pylint: disable-msg=C0103 """Close the writing side of the pipe to notify all waiters. """ @@ -343,7 +345,7 @@ class PipeCondition(_BaseCondition): assert self._nwaiters > 0 self._nwaiters -= 1 - def notifyAll(self): + def notifyAll(self): # pylint: disable-msg=C0103 """Notify all currently waiting threads. """ @@ -380,7 +382,7 @@ class _CountingCondition(object): self._cond = threading.Condition(lock=lock) self._nwaiters = 0 - def notifyAll(self): + def notifyAll(self): # pylint: disable-msg=C0103 """Notifies the condition. """ @@ -1202,7 +1204,7 @@ class GanetiLockManager: # the test cases. return utils.any((self._is_owned(l) for l in LEVELS[level + 1:])) - def _BGL_owned(self): + def _BGL_owned(self): # pylint: disable-msg=C0103 """Check if the current thread owns the BGL. Both an exclusive or a shared acquisition work. @@ -1210,7 +1212,8 @@ class GanetiLockManager: """ return BGL in self.__keyring[LEVEL_CLUSTER]._list_owned() - def _contains_BGL(self, level, names): + @staticmethod + def _contains_BGL(level, names): # pylint: disable-msg=C0103 """Check if the level contains the BGL. Check if acting on the given level and set of names will change diff --git a/lib/luxi.py b/lib/luxi.py index 753b005..f062816 100644 --- a/lib/luxi.py +++ b/lib/luxi.py @@ -56,6 +56,7 @@ REQ_QUERY_NODES = "QueryNodes" REQ_QUERY_EXPORTS = "QueryExports" REQ_QUERY_CONFIG_VALUES = "QueryConfigValues" REQ_QUERY_CLUSTER_INFO = "QueryClusterInfo" +REQ_QUERY_TAGS = "QueryTags" REQ_QUEUE_SET_DRAIN_FLAG = "SetDrainFlag" REQ_SET_WATCHER_PAUSE = "SetWatcherPause" @@ -286,7 +287,7 @@ class Client(object): old_transp = self.transport self.transport = None old_transp.Close() - except Exception: + except Exception: # pylint: disable-msg=W0703 pass def CallMethod(self, method, args): @@ -384,5 +385,8 @@ class Client(object): def QueryConfigValues(self, fields): return self.CallMethod(REQ_QUERY_CONFIG_VALUES, fields) + def QueryTags(self, kind, name): + return self.CallMethod(REQ_QUERY_TAGS, (kind, name)) + # TODO: class Server(object) diff --git a/lib/mcpu.py b/lib/mcpu.py index 0713aee..af4ff1a 100644 --- a/lib/mcpu.py +++ b/lib/mcpu.py @@ -50,7 +50,6 @@ def _CalculateLockAttemptTimeouts(): """Calculate timeouts for lock attempts. """ - running_sum = 0 result = [1.0] # Wait for a total of at least 150s before doing a blocking acquire @@ -137,7 +136,7 @@ class _LockAttemptTimeoutStrategy(object): return timeout -class OpExecCbBase: +class OpExecCbBase: # pylint: disable-msg=W0232 """Base class for OpCode execution callbacks. """ diff --git a/lib/objects.py b/lib/objects.py index 9d04936..314606d 100644 --- a/lib/objects.py +++ b/lib/objects.py @@ -26,11 +26,12 @@ pass to and from external parties. """ -# pylint: disable-msg=E0203 +# pylint: disable-msg=E0203,W0201 # E0203: Access to member %r before its definition, since we use # objects.py which doesn't explicitely initialise its members +# W0201: Attribute '%s' defined outside __init__ import ConfigParser import re @@ -47,7 +48,7 @@ __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", _TIMESTAMPS = ["ctime", "mtime"] _UUID = ["uuid"] -def FillDict(defaults_dict, custom_dict, skip_keys=[]): +def FillDict(defaults_dict, custom_dict, skip_keys=None): """Basic function to apply settings on top a default dict. @type defaults_dict: dict @@ -62,11 +63,12 @@ def FillDict(defaults_dict, custom_dict, skip_keys=[]): """ ret_dict = copy.deepcopy(defaults_dict) ret_dict.update(custom_dict) - for k in skip_keys: - try: - del ret_dict[k] - except KeyError: - pass + if skip_keys: + for k in skip_keys: + try: + del ret_dict[k] + except KeyError: + pass return ret_dict @@ -153,7 +155,7 @@ class ConfigObject(object): raise errors.ConfigurationError("Invalid object passed to FromDict:" " expected dict, got %s" % type(val)) val_str = dict([(str(k), v) for k, v in val.iteritems()]) - obj = cls(**val_str) + obj = cls(**val_str) # pylint: disable-msg=W0142 return obj @staticmethod @@ -853,6 +855,8 @@ class Cluster(TaggableObject): """Fill defaults for missing configuration values. """ + # pylint: disable-msg=E0203 + # because these are "defined" via slots, not manually if self.hvparams is None: self.hvparams = constants.HVC_DEFAULTS else: diff --git a/lib/opcodes.py b/lib/opcodes.py index 0244144..80c802d 100644 --- a/lib/opcodes.py +++ b/lib/opcodes.py @@ -447,7 +447,7 @@ class OpCreateInstance(OpCode): "pnode", "disk_template", "snode", "mode", "disks", "nics", "src_node", "src_path", "start", - "wait_for_sync", "ip_check", + "wait_for_sync", "ip_check", "name_check", "file_storage_dir", "file_driver", "iallocator", "hypervisor", "hvparams", "beparams", diff --git a/lib/rapi/__init__.py b/lib/rapi/__init__.py index fde366c..28d6ead 100644 --- a/lib/rapi/__init__.py +++ b/lib/rapi/__init__.py @@ -18,5 +18,6 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""Ganeti RAPI module""" RAPI_ACCESS_WRITE = "write" diff --git a/lib/rapi/baserlib.py b/lib/rapi/baserlib.py index 30d9cbf..020d7bc 100644 --- a/lib/rapi/baserlib.py +++ b/lib/rapi/baserlib.py @@ -23,6 +23,10 @@ """ +# pylint: disable-msg=C0103 + +# C0103: Invalid name, since the R_* names are not conforming + import logging from ganeti import luxi @@ -197,12 +201,14 @@ def GetClient(): raise http.HttpBadGateway("Master seems to unreachable: %s" % str(err)) -def FeedbackFn(ts, log_type, log_msg): +def FeedbackFn(ts, log_type, log_msg): # pylint: disable-msg=W0613 """Feedback logging function for http case. We don't have a stdout for printing log messages, so log them to the http log at least. + @param ts: the timestamp (unused) + """ logging.info("%s: %s", log_type, log_msg) diff --git a/lib/rapi/connector.py b/lib/rapi/connector.py index 8c20640..3ef842b 100644 --- a/lib/rapi/connector.py +++ b/lib/rapi/connector.py @@ -22,6 +22,10 @@ """ +# pylint: disable-msg=C0103 + +# C0103: Invalid name, since the R_* names are not conforming + import cgi import re @@ -42,12 +46,14 @@ class Mapper: """Map resource to method. """ - def __init__(self, connector=CONNECTOR): + def __init__(self, connector=None): """Resource mapper constructor. @param connector: a dictionary, mapping method name with URL path regexp """ + if connector is None: + connector = CONNECTOR self._connector = connector def getController(self, uri): @@ -95,7 +101,8 @@ class R_root(baserlib.R_Generic): """/ resource. """ - def GET(self): + @staticmethod + def GET(): """Show the list of mapped resources. @return: a dictionary with 'name' and 'uri' keys for each of them. @@ -138,7 +145,8 @@ class R_2(baserlib.R_Generic): """ /2 resource, the root of the version 2 API. """ - def GET(self): + @staticmethod + def GET(): """Show the list of mapped resources. @return: a dictionary with 'name' and 'uri' keys for each of them. diff --git a/lib/rapi/rlib2.py b/lib/rapi/rlib2.py index 669bae4..f475277 100644 --- a/lib/rapi/rlib2.py +++ b/lib/rapi/rlib2.py @@ -23,6 +23,10 @@ """ +# pylint: disable-msg=C0103 + +# C0103: Invalid name, since the R_* names are not conforming + from ganeti import opcodes from ganeti import http from ganeti import constants @@ -73,7 +77,8 @@ class R_version(baserlib.R_Generic): to adapt clients accordingly. """ - def GET(self): + @staticmethod + def GET(): """Returns the remote API version. """ @@ -84,7 +89,8 @@ class R_2_info(baserlib.R_Generic): """Cluster info. """ - def GET(self): + @staticmethod + def GET(): """Returns cluster information. """ @@ -96,7 +102,8 @@ class R_2_os(baserlib.R_Generic): """/2/os resource. """ - def GET(self): + @staticmethod + def GET(): """Return a list of all OSes. Can return error 500 in case of a problem. @@ -127,7 +134,8 @@ class R_2_redist_config(baserlib.R_Generic): """/2/redistribute-config resource. """ - def PUT(self): + @staticmethod + def PUT(): """Redistribute configuration to all nodes. """ @@ -138,7 +146,8 @@ class R_2_jobs(baserlib.R_Generic): """/2/jobs resource. """ - def GET(self): + @staticmethod + def GET(): """Returns a dictionary of jobs. @return: a dictionary with jobs id and uri. @@ -464,6 +473,7 @@ class R_2_instances(baserlib.R_Generic): nics=nics, start=fn('start', True), ip_check=fn('ip_check', True), + name_check=fn('name_check', True), wait_for_sync=True, hypervisor=fn('hypervisor', None), hvparams=hvparams, @@ -667,6 +677,7 @@ class _R_Tags(baserlib.R_Generic): Example: ["tag1", "tag2", "tag3"] """ + # pylint: disable-msg=W0212 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name) def PUT(self): @@ -676,6 +687,7 @@ class _R_Tags(baserlib.R_Generic): you'll have back a job id. """ + # pylint: disable-msg=W0212 if 'tag' not in self.queryargs: raise http.HttpBadRequest("Please specify tag(s) to add using the" " the 'tag' parameter") @@ -691,6 +703,7 @@ class _R_Tags(baserlib.R_Generic): /tags?tag=[tag]&tag=[tag] """ + # pylint: disable-msg=W0212 if 'tag' not in self.queryargs: # no we not gonna delete all tags raise http.HttpBadRequest("Cannot delete all tags - please specify" diff --git a/lib/rpc.py b/lib/rpc.py index f354a41..69e692c 100644 --- a/lib/rpc.py +++ b/lib/rpc.py @@ -42,7 +42,8 @@ from ganeti import serializer from ganeti import constants from ganeti import errors -import ganeti.http.client +# pylint has a bug here, doesn't see this import +import ganeti.http.client # pylint: disable-msg=W0611 # Module level variable @@ -55,7 +56,7 @@ def Init(): Must be called before using any RPC function. """ - global _http_manager + global _http_manager # pylint: disable-msg=W0603 assert not _http_manager, "RPC module initialized more than once" @@ -70,7 +71,7 @@ def Shutdown(): Must be called before quitting the program. """ - global _http_manager + global _http_manager # pylint: disable-msg=W0603 if _http_manager: _http_manager.Shutdown() @@ -162,7 +163,7 @@ class RpcResult(object): args = (msg, prereq) else: args = (msg, ) - raise ec(*args) + raise ec(*args) # pylint: disable-msg=W0142 class Client: diff --git a/lib/serializer.py b/lib/serializer.py index 43dfaa7..b568497 100644 --- a/lib/serializer.py +++ b/lib/serializer.py @@ -24,6 +24,10 @@ This module introduces a simple abstraction over the serialization backend (currently json). """ +# pylint: disable-msg=C0103 + +# C0103: Invalid name, since pylint doesn't see that Dump points to a +# function and not a constant import simplejson import re diff --git a/lib/ssh.py b/lib/ssh.py index 87c25c9..31fbdbb 100644 --- a/lib/ssh.py +++ b/lib/ssh.py @@ -212,7 +212,7 @@ class SshRunner: - detail: string with details """ - retval = self.Run(node, 'root', 'hostname') + retval = self.Run(node, 'root', 'hostname --fqdn') if retval.failed: msg = "ssh problem" diff --git a/lib/storage.py b/lib/storage.py index b4b303a..5d30396 100644 --- a/lib/storage.py +++ b/lib/storage.py @@ -23,6 +23,14 @@ """ +# pylint: disable-msg=W0232,R0201 + +# W0232, since we use these as singletons rather than object holding +# data + +# R0201, for the same reason + +# TODO: FileStorage initialised with paths whereas the others not import logging @@ -50,7 +58,7 @@ class _Base: """ raise NotImplementedError() - def Modify(self, name, changes): + def Modify(self, name, changes): # pylint: disable-msg=W0613 """Modifies an entity within the storage unit. @type name: string @@ -76,7 +84,7 @@ class _Base: raise NotImplementedError() -class FileStorage(_Base): +class FileStorage(_Base): # pylint: disable-msg=W0223 """File storage unit. """ @@ -153,7 +161,7 @@ class FileStorage(_Base): return values -class _LvmBase(_Base): +class _LvmBase(_Base): # pylint: disable-msg=W0223 """Base class for LVM storage containers. @cvar LIST_FIELDS: list of tuples consisting of three elements: SF_* @@ -248,7 +256,7 @@ class _LvmBase(_Base): if callable(mapper): # we got a function, call it with all the declared fields - val = mapper(*values) + val = mapper(*values) # pylint: disable-msg=W0142 elif len(values) == 1: # we don't have a function, but we had a single field # declared, pass it unchanged @@ -324,7 +332,7 @@ class _LvmBase(_Base): yield fields -class LvmPvStorage(_LvmBase): +class LvmPvStorage(_LvmBase): # pylint: disable-msg=W0223 """LVM Physical Volume storage unit. """ diff --git a/lib/utils.py b/lib/utils.py index 8f7d557..1abec0e 100644 --- a/lib/utils.py +++ b/lib/utils.py @@ -1226,21 +1226,18 @@ def EnsureDirs(dirs): raise errors.GenericError("%s is not a directory" % dir_name) -def ReadFile(file_name, size=None): +def ReadFile(file_name, size=-1): """Reads a file. - @type size: None or int - @param size: Read at most size bytes + @type size: int + @param size: Read at most size bytes (if negative, entire file) @rtype: str @return: the (possibly partial) content of the file """ f = open(file_name, "r") try: - if size is None: - return f.read() - else: - return f.read(size) + return f.read(size) finally: f.close() @@ -1372,14 +1369,14 @@ def FirstFree(seq, base=0): return None -def all(seq, pred=bool): +def all(seq, pred=bool): # pylint: disable-msg=W0622 "Returns True if pred(x) is True for every element in the iterable" for _ in itertools.ifilterfalse(pred, seq): return False return True -def any(seq, pred=bool): +def any(seq, pred=bool): # pylint: disable-msg=W0622 "Returns True if pred(x) is True for at least one element in the iterable" for _ in itertools.ifilter(pred, seq): return True @@ -1401,20 +1398,26 @@ def UniqueSequence(seq): return [i for i in seq if i not in seen and not seen.add(i)] -def IsValidMac(mac): - """Predicate to check if a MAC address is valid. +def NormalizeAndValidateMac(mac): + """Normalizes and check if a MAC address is valid. Checks whether the supplied MAC address is formally correct, only - accepts colon separated format. + accepts colon separated format. Normalize it to all lower. @type mac: str @param mac: the MAC to be validated - @rtype: boolean - @return: True is the MAC seems valid + @rtype: str + @return: returns the normalized and validated MAC. + + @raise errors.OpPrereqError: If the MAC isn't valid """ - mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$") - return mac_check.match(mac) is not None + mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I) + if not mac_check.match(mac): + raise errors.OpPrereqError("Invalid MAC address specified: %s" % + mac, errors.ECODE_INVAL) + + return mac.lower() def TestDelay(duration): @@ -1496,6 +1499,8 @@ def Daemonize(logfile): @return: the value zero """ + # pylint: disable-msg=W0212 + # yes, we really want os._exit UMASK = 077 WORKDIR = "/" @@ -1570,7 +1575,7 @@ def RemovePidFile(name): # TODO: we could check here that the file contains our pid try: RemoveFile(pidfilename) - except: + except: # pylint: disable-msg=W0702 pass @@ -1961,6 +1966,7 @@ def LockedMethod(fn): logging.debug(*args, **kwargs) def wrapper(self, *args, **kwargs): + # pylint: disable-msg=W0212 assert hasattr(self, '_lock') lock = self._lock _LockDebug("Waiting for %s", lock) @@ -2167,6 +2173,7 @@ def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep, while True: try: + # pylint: disable-msg=W0142 return fn(*args) except RetryAgain: pass @@ -2408,7 +2415,8 @@ class SignalHandler(object): """ self.called = False - def _HandleSignal(self, signum, frame): + # we don't care about arguments, but we leave them named for the future + def _HandleSignal(self, signum, frame): # pylint: disable-msg=W0613 """Actual signal handling function. """ diff --git a/lib/workerpool.py b/lib/workerpool.py index fa202d0..2d3937a 100644 --- a/lib/workerpool.py +++ b/lib/workerpool.py @@ -34,6 +34,7 @@ class BaseWorker(threading.Thread, object): Users of a worker pool must override RunTask in a subclass. """ + # pylint: disable-msg=W0212 def __init__(self, pool, worker_id): """Constructor for BaseWorker thread. @@ -118,7 +119,7 @@ class BaseWorker(threading.Thread, object): self.RunTask(*self._current_task) logging.debug("Worker %s: done with task %r", self.worker_id, self._current_task) - except: + except: # pylint: disable-msg=W0702 logging.error("Worker %s: Caught unhandled exception", self.worker_id, exc_info=True) finally: @@ -223,7 +224,7 @@ class WorkerPool(object): """ for worker in self._workers + self._termworkers: - if worker._HasRunningTaskUnlocked(): + if worker._HasRunningTaskUnlocked(): # pylint: disable-msg=W0212 return True return False diff --git a/man/gnt-instance.sgml b/man/gnt-instance.sgml index 81d75b2..d56a242 100644 --- a/man/gnt-instance.sgml +++ b/man/gnt-instance.sgml @@ -78,6 +78,10 @@ -s SIZE + --no-ip-check + --no-name-check + --no-start + --net=N:options --no-nics @@ -147,6 +151,28 @@ + The skips the checks that are + done to see if the instance's IP is not already alive + (i.e. reachable from the master node). + + + + The skips the check for the + instance name via the resolver (e.g. in DNS or /etc/hosts, + depending on your setup). Since the name check is used to + compute the IP address, if you pass this option you must + also pass the option. + + + + If you don't wat the instance to automatically start after + creation, this is possible via the + option. This will leave the + instance down until a subsequent gnt-instance + start command. + + + The NICs of the instances can be specified via the option. By default, one NIC is created for the instance, with a random MAC, and set @@ -521,6 +547,27 @@ + + disk_cache + + Valid for the KVM hypervisor. + + The disk cache mode. It can be either + default to not pass any cache + option to KVM, or one of the KVM cache modes: none + (for direct I/O), writethrough (to use the host cache + but report completion to the guest only when the host + has commited the changes to disk) or writeback (to use + the host cache and report completion as soon as the + data is in the host cache). Note that there are + special considerations for the cache mode depending on + version of KVM used and disk type (always raw file + under Ganeti), please refer to the KVM documentation + for more details. + + + + @@ -749,6 +796,14 @@ + name_check + + Skip the name check for instances; + see the description in the add + command for details. + + + file_storage_dir, file_driver Configuration for the file diff --git a/pylintrc b/pylintrc index e1de46e..7d42070 100644 --- a/pylintrc +++ b/pylintrc @@ -11,7 +11,7 @@ load-plugins = [REPORTS] output-format = colorized -include-ids = no +include-ids = yes files-output = no reports = no evaluation = 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) @@ -19,15 +19,21 @@ comment = yes [BASIC] required-attributes = -no-docstring-rgx = __.*__ +# disabling docstring checks since we have way too many without (complex +# inheritance hierarchies) +#no-docstring-rgx = __.*__ +no-docstring-rgx = .* module-rgx = (([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ -const-rgx = ((_{0,2}[A-Z][A-Z0-9_]*)|(__.*__))$ +# added lower-case names +const-rgx = ((_{0,2}[A-Z][A-Z0-9_]*)|(__.*__)|(_?[a-z_][a-z0-9_]*))$ class-rgx = _?[A-Z][a-zA-Z0-9]+$ -function-rgx = (_?([A-Z]+[a-z0-9]+([A-Z]+[a-z0-9]*)*)|main)$ -method-rgx = (_{0,2}[A-Z]+[a-z0-9]+([A-Z]+[a-z0-9]*)*|__.*__)$ +# added lower-case names +function-rgx = (_?([A-Z]+[a-z0-9]+([A-Z]+[a-z0-9]*)*)|main|([a-z_][a-z0-9_]*))$ +# add lower-case names, since derived classes must obey method names +method-rgx = (_{0,2}[A-Z]+[a-z0-9]+([A-Z]+[a-z0-9]*)*|__.*__|([a-z_][a-z0-9_]*))$ attr-rgx = [a-z_][a-z0-9_]{1,30}$ argument-rgx = [a-z_][a-z0-9_]*$ -variable-rgx = (_?([a-z_][a-z0-9_]*)|([A-Z0-9_]+))$ +variable-rgx = (_?([a-z_][a-z0-9_]*)|(_?[A-Z0-9_]+))$ inlinevar-rgx = [A-Za-z_][A-Za-z0-9_]*$ good-names = i,j,k,_ bad-names = foo,bar,baz,toto,tutu,tata @@ -48,15 +54,16 @@ ignore-iface-methods = defining-attr-methods = __init__,__new__,setUp [DESIGN] -max-args = 6 -max-locals = 15 -max-returns = 6 -max-branchs = 12 -max-statements = 50 +max-args = 15 +max-locals = 50 +max-returns = 10 +max-branchs = 80 +max-statements = 200 max-parents = 7 -max-attributes = 7 -min-public-methods = 2 -max-public-methods = 20 +max-attributes = 20 +# zero as struct-like (PODS) classes don't export any methods +min-public-methods = 0 +max-public-methods = 50 [IMPORTS] deprecated-modules = regsub,string,TERMIOS,Bastion,rexec @@ -66,7 +73,7 @@ int-import-graph = [FORMAT] max-line-length = 80 -max-module-lines = 1000 +max-module-lines = 10000 indent-string = " " [MISCELLANEOUS] @@ -76,3 +83,26 @@ notes = FIXME,XXX,TODO min-similarity-lines = 4 ignore-comments = yes ignore-docstrings = yes + +[MESSAGES CONTROL] + +# Enable only checker(s) with the given id(s). This option conflicts with the +# disable-checker option +#enable-checker= + +# Enable all checker(s) except those with the given id(s). This option +# conflicts with the enable-checker option +#disable-checker= +disable-checker=similarities + +# Enable all messages in the listed categories (IRCWEF). +#enable-msg-cat= + +# Disable all messages in the listed categories (IRCWEF). +disable-msg-cat= + +# Enable the message(s) with the given id(s). +#enable-msg= + +# Disable the message(s) with the given id(s). +disable-msg=W0511,R0922,W0201 diff --git a/scripts/gnt-backup b/scripts/gnt-backup index a073356..fd04a92 100755 --- a/scripts/gnt-backup +++ b/scripts/gnt-backup @@ -18,18 +18,19 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""Backup related commands""" -# pylint: disable-msg=W0401,W0614 +# pylint: disable-msg=W0401,W0613,W0614,C0103 # W0401: Wildcard import ganeti.cli +# W0613: Unused argument, since all functions follow the same API # W0614: Unused import %s from wildcard import (since we need cli) +# C0103: Invalid name gnt-backup import sys from ganeti.cli import * from ganeti import opcodes from ganeti import constants -from ganeti import errors -from ganeti import utils _VALUE_TRUE = "true" @@ -79,7 +80,6 @@ def ExportInstance(opts, args): if not isinstance(dlist, list): ToStderr("Cannot parse execution results") return 1 - tot_dsk = len(dlist) # TODO: handle diskless instances if dlist.count(False) == 0: # all OK @@ -117,7 +117,6 @@ def RemoveExport(opts, args): @return: the desired exit code """ - instance = args[0] op = opcodes.OpRemoveExport(instance_name=args[0]) SubmitOpCode(op) @@ -137,6 +136,7 @@ import_opts = [ SRC_DIR_OPT, SRC_NODE_OPT, NOIPCHECK_OPT, + NONAMECHECK_OPT, IALLOCATOR_OPT, FILESTORE_DIR_OPT, FILESTORE_DRIVER_OPT, diff --git a/scripts/gnt-cluster b/scripts/gnt-cluster index 85a2a12..53bea1a 100755 --- a/scripts/gnt-cluster +++ b/scripts/gnt-cluster @@ -18,10 +18,13 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""Cluster related commands""" -# pylint: disable-msg=W0401,W0614 +# pylint: disable-msg=W0401,W0613,W0614,C0103 # W0401: Wildcard import ganeti.cli +# W0613: Unused argument, since all functions follow the same API # W0614: Unused import %s from wildcard import (since we need cli) +# C0103: Invalid name gnt-cluster import sys import os.path @@ -510,7 +513,7 @@ def SetClusterParams(opts, args): # a list of (name, dict) we can pass directly to dict() (or []) hvparams = dict(opts.hvparams) - for hv, hv_params in hvparams.iteritems(): + for hv_params in hvparams.values(): utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) beparams = opts.beparams diff --git a/scripts/gnt-debug b/scripts/gnt-debug index e34b409..6c9d5b7 100755 --- a/scripts/gnt-debug +++ b/scripts/gnt-debug @@ -18,10 +18,12 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""Debugging commands""" -# pylint: disable-msg=W0401,W0614 +# pylint: disable-msg=W0401,W0614,C0103 # W0401: Wildcard import ganeti.cli # W0614: Unused import %s from wildcard import (since we need cli) +# C0103: Invalid name gnt-backup import sys import simplejson @@ -30,7 +32,6 @@ import time from ganeti.cli import * from ganeti import cli from ganeti import opcodes -from ganeti import constants from ganeti import utils from ganeti import errors @@ -77,6 +78,7 @@ def GenericOpCodes(opts, args): ToStdout("Loading...") for job_idx in range(opts.rep_job): for fname in args: + # pylint: disable-msg=W0142 op_data = simplejson.loads(utils.ReadFile(fname)) op_list = [opcodes.OpCode.LoadOpCode(val) for val in op_data] op_list = op_list * opts.rep_op diff --git a/scripts/gnt-instance b/scripts/gnt-instance index 44b8410..56eb510 100755 --- a/scripts/gnt-instance +++ b/scripts/gnt-instance @@ -18,16 +18,17 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""Instance related commands""" -# pylint: disable-msg=W0401,W0614 +# pylint: disable-msg=W0401,W0614,C0103 # W0401: Wildcard import ganeti.cli # W0614: Unused import %s from wildcard import (since we need cli) +# C0103: Invalid name gnt-instance import sys import os import itertools import simplejson -import time from cStringIO import StringIO from ganeti.cli import * @@ -75,6 +76,7 @@ def _ExpandMultiNames(mode, names, client=None): @raise errors.OpPrereqError: for invalid input parameters """ + # pylint: disable-msg=W0142 if client is None: client = GetClient() if mode == _SHUTDOWN_CLUSTER: @@ -318,7 +320,6 @@ def AddInstance(opts, args): """ return GenericInstanceCreate(constants.INSTANCE_CREATE, opts, args) - return 0 def BatchCreate(opts, args): @@ -357,6 +358,7 @@ def BatchCreate(opts, args): "nics": None, "start": True, "ip_check": True, + "name_check": True, "hypervisor": None, "hvparams": {}, "file_storage_dir": None, @@ -395,7 +397,7 @@ def BatchCreate(opts, args): json_filename = args[0] try: instance_data = simplejson.loads(utils.ReadFile(json_filename)) - except Exception, err: + except Exception, err: # pylint: disable-msg=W0703 ToStderr("Can't parse the instance definition file: %s" % str(err)) return 1 @@ -453,6 +455,7 @@ def BatchCreate(opts, args): nics=tmp_nics, start=specs['start'], ip_check=specs['ip_check'], + name_check=specs['name_check'], wait_for_sync=True, iallocator=specs['iallocator'], hypervisor=hypervisor, @@ -752,7 +755,6 @@ def ReplaceDisks(opts, args): @return: the desired exit code """ - instance_name = args[0] new_2ndary = opts.dst_node iallocator = opts.iallocator if opts.disks is None: @@ -910,7 +912,7 @@ def ConnectToInstanceConsole(opts, args): finally: ToStderr("Can't run console command %s with arguments:\n'%s'", cmd[0], " ".join(cmd)) - os._exit(1) + os._exit(1) # pylint: disable-msg=W0212 def _FormatLogicalID(dev_type, logical_id): @@ -1280,6 +1282,7 @@ add_opts = [ NET_OPT, NODE_PLACEMENT_OPT, NOIPCHECK_OPT, + NONAMECHECK_OPT, NONICS_OPT, NOSTART_OPT, NWSYNC_OPT, diff --git a/scripts/gnt-job b/scripts/gnt-job index 44ae4b2..2e63cf6 100755 --- a/scripts/gnt-job +++ b/scripts/gnt-job @@ -18,10 +18,13 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""Job related commands""" -# pylint: disable-msg=W0401,W0614 +# pylint: disable-msg=W0401,W0613,W0614,C0103 # W0401: Wildcard import ganeti.cli +# W0613: Unused argument, since all functions follow the same API # W0614: Unused import %s from wildcard import (since we need cli) +# C0103: Invalid name gnt-job import sys @@ -179,7 +182,7 @@ def CancelJobs(opts, args): client = GetClient() for job_id in args: - (success, msg) = client.CancelJob(job_id) + (_, msg) = client.CancelJob(job_id) ToStdout(msg) # TODO: Different exit value if not all jobs were canceled? diff --git a/scripts/gnt-node b/scripts/gnt-node index 7e3e439..f1cde4f 100755 --- a/scripts/gnt-node +++ b/scripts/gnt-node @@ -18,15 +18,17 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""Node related commands""" -# pylint: disable-msg=W0401,W0614 +# pylint: disable-msg=W0401,W0613,W0614,C0103 # W0401: Wildcard import ganeti.cli +# W0613: Unused argument, since all functions follow the same API # W0614: Unused import %s from wildcard import (since we need cli) +# C0103: Invalid name gnt-node import sys from ganeti.cli import * -from ganeti import cli from ganeti import opcodes from ganeti import utils from ganeti import constants @@ -354,8 +356,6 @@ def MigrateNode(opts, args): pinst = utils.NiceSort(pinst) - retcode = 0 - if not force and not AskUser("Migrate instance(s) %s?" % (",".join("'%s'" % name for name in pinst))): return 2 diff --git a/scripts/gnt-os b/scripts/gnt-os index f3667c4..dd916ac 100755 --- a/scripts/gnt-os +++ b/scripts/gnt-os @@ -18,17 +18,19 @@ # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. +"""OS scripts related commands""" -# pylint: disable-msg=W0401,W0614 +# pylint: disable-msg=W0401,W0613,W0614,C0103 # W0401: Wildcard import ganeti.cli +# W0613: Unused argument, since all functions follow the same API # W0614: Unused import %s from wildcard import (since we need cli) +# C0103: Invalid name gnt-os import sys from ganeti.cli import * from ganeti import opcodes from ganeti import utils -from ganeti import constants def ListOS(opts, args): @@ -104,7 +106,7 @@ def DiagnoseOS(opts, args): has_bad = False - for os_name, os_valid, os_variants, node_data in result: + for os_name, _, os_variants, node_data in result: nodes_valid = {} nodes_bad = {} nodes_hidden = {} diff --git a/test/ganeti.constants_unittest.py b/test/ganeti.constants_unittest.py index 219afee..7f1f0cb 100755 --- a/test/ganeti.constants_unittest.py +++ b/test/ganeti.constants_unittest.py @@ -60,6 +60,10 @@ class TestConstants(unittest.TestCase): self.failUnless(constants.LDS_OKAY < constants.LDS_UNKNOWN) self.failUnless(constants.LDS_UNKNOWN < constants.LDS_FAULTY) + def testClockSkew(self): + self.failUnless(constants.NODE_MAX_CLOCK_SKEW < + (0.8 * constants.CONFD_MAX_CLOCK_SKEW)) + class TestParameterNames(unittest.TestCase): """HV/BE parameter tests""" @@ -81,7 +85,7 @@ class TestConfdConstants(unittest.TestCase): def testFourCc(self): self.failUnlessEqual(len(constants.CONFD_MAGIC_FOURCC), 4, - "Invalid fourcc len, should be 4") + "Invalid fourcc len, should be 4") def _IsUniqueSequence(self, sequence): seen = set() diff --git a/tools/burnin b/tools/burnin index c442957..f0e4113 100755 --- a/tools/burnin +++ b/tools/burnin @@ -83,9 +83,12 @@ def Err(msg, exit_code=1): class SimpleOpener(urllib.FancyURLopener): """A simple url opener""" + # pylint: disable-msg=W0221 - def prompt_user_passwd(self, host, realm, clear_cache = 0): + def prompt_user_passwd(self, host, realm, clear_cache=0): """No-interaction version of prompt_user_passwd.""" + # we follow parent class' API + # pylint: disable-msg=W0613 return None, None def http_error_default(self, url, fp, errcode, errmsg, headers): @@ -115,6 +118,8 @@ OPTIONS = [ completion_suggest=("128M 256M 512M 1G 4G 8G" " 12G 16G").split()), cli.VERBOSE_OPT, + cli.NOIPCHECK_OPT, + cli.NONAMECHECK_OPT, cli.cli_option("--no-replace1", dest="do_replace1", help="Skip disk replacement with the same secondary", action="store_false", default=True), @@ -199,6 +204,39 @@ OPTIONS = [ ARGUMENTS = [cli.ArgInstance(min=1)] +def _DoCheckInstances(fn): + """Decorator for checking instances. + + """ + def wrapper(self, *args, **kwargs): + val = fn(self, *args, **kwargs) + for instance in self.instances: + self._CheckInstanceAlive(instance) # pylint: disable-msg=W0212 + return val + + return wrapper + + +def _DoBatch(retry): + """Decorator for possible batch operations. + + Must come after the _DoCheckInstances decorator (if any). + + @param retry: whether this is a retryable batch, will be + passed to StartBatch + + """ + def wrap(fn): + def batched(self, *args, **kwargs): + self.StartBatch(retry) + val = fn(self, *args, **kwargs) + self.CommitQueue() + return val + return batched + + return wrap + + class Burner(object): """Burner class.""" @@ -254,7 +292,7 @@ class Burner(object): Log("Idempotent %s succeeded after %d retries" % (msg, MAX_RETRIES - retry_count)) return val - except Exception, err: + except Exception, err: # pylint: disable-msg=W0703 if retry_count == 0: Log("Non-idempotent %s failed, aborting" % (msg, )) raise @@ -342,43 +380,12 @@ class Burner(object): Log("waiting for job %s for %s" % (jid, iname), indent=2) try: results.append(cli.PollJob(jid, cl=self.cl, feedback_fn=self.Feedback)) - except Exception, err: + except Exception, err: # pylint: disable-msg=W0703 Log("Job for %s failed: %s" % (iname, err)) if len(results) != len(jobs): raise BurninFailure() return results - def _DoCheckInstances(fn): - """Decorator for checking instances. - - """ - def wrapper(self, *args, **kwargs): - val = fn(self, *args, **kwargs) - for instance in self.instances: - self._CheckInstanceAlive(instance) - return val - - return wrapper - - def _DoBatch(retry): - """Decorator for possible batch operations. - - Must come after the _DoCheckInstances decorator (if any). - - @param retry: whether this is a retryable batch, will be - passed to StartBatch - - """ - def wrap(fn): - def batched(self, *args, **kwargs): - self.StartBatch(retry) - val = fn(self, *args, **kwargs) - self.CommitQueue() - return val - return batched - - return wrap - def ParseOptions(self): """Parses the command line options. @@ -422,6 +429,9 @@ class Burner(object): if options.nodes and options.iallocator: Err("Give either the nodes option or the iallocator option, not both") + if options.http_check and not options.name_check: + Err("Can't enable HTTP checks without name checks") + self.opts = options self.instances = args self.bep = { @@ -498,7 +508,8 @@ class Burner(object): pnode=pnode, snode=snode, start=True, - ip_check=True, + ip_check=self.opts.ip_check, + name_check=self.opts.name_check, wait_for_sync=True, file_driver="loop", file_storage_dir=None, @@ -536,7 +547,7 @@ class Burner(object): disks=[i for i in range(self.disk_count)]) Log("run %s" % mode, indent=2) ops.append(op) - self.ExecOrQueue(instance, *ops) + self.ExecOrQueue(instance, *ops) # pylint: disable-msg=W0142 @_DoBatch(True) def BurnReplaceDisks2(self): @@ -648,7 +659,8 @@ class Burner(object): pnode=pnode, snode=snode, start=True, - ip_check=True, + ip_check=self.opts.ip_check, + name_check=self.opts.name_check, wait_for_sync=True, file_storage_dir=None, file_driver="loop", @@ -665,15 +677,18 @@ class Burner(object): Log("remove export", indent=2) self.ExecOrQueue(instance, exp_op, rem_op, imp_op, erem_op) - def StopInstanceOp(self, instance): + @staticmethod + def StopInstanceOp(instance): """Stop given instance.""" return opcodes.OpShutdownInstance(instance_name=instance) - def StartInstanceOp(self, instance): + @staticmethod + def StartInstanceOp(instance): """Start given instance.""" return opcodes.OpStartupInstance(instance_name=instance, force=False) - def RenameInstanceOp(self, instance, instance_new): + @staticmethod + def RenameInstanceOp(instance, instance_new): """Rename instance.""" return opcodes.OpRenameInstance(instance_name=instance, new_name=instance_new) @@ -751,7 +766,7 @@ class Burner(object): ignore_secondaries=False) Log("reboot with type '%s'" % reboot_type, indent=2) ops.append(op) - self.ExecOrQueue(instance, *ops) + self.ExecOrQueue(instance, *ops) # pylint: disable-msg=W0142 @_DoCheckInstances @_DoBatch(True) @@ -904,7 +919,7 @@ class Burner(object): if not self.opts.keep_instances: try: self.BurnRemove() - except Exception, err: + except Exception, err: # pylint: disable-msg=W0703 if has_err: # already detected errors, so errors in removal # are quite expected Log("Note: error detected during instance remove: %s" % str(err)) diff --git a/tools/cfgshell b/tools/cfgshell index 101e2ca..57ed0bd 100755 --- a/tools/cfgshell +++ b/tools/cfgshell @@ -53,6 +53,8 @@ class ConfigShell(cmd.Cmd): responsibility to know what they're doing. """ + # all do_/complete_* functions follow the same API + # pylint: disable-msg=W0613 prompt = "(/) " def __init__(self, cfg_file=None): @@ -152,7 +154,7 @@ class ConfigShell(cmd.Cmd): arg = None try: self.cfg = config.ConfigWriter(cfg_file=arg, offline=True) - self.parents = [self.cfg._config_data] + self.parents = [self.cfg._config_data] # pylint: disable-msg=W0212 self.path = [] except errors.ConfigurationError, err: print "Error: %s" % str(err) @@ -177,7 +179,7 @@ class ConfigShell(cmd.Cmd): """ pointer = self.parents[-1] - dirs, entries = self._get_entries(pointer) + dirs, _ = self._get_entries(pointer) matches = [str(name) for name in dirs if name.startswith(text)] return matches @@ -202,7 +204,7 @@ class ConfigShell(cmd.Cmd): return False pointer = self.parents[-1] - dirs, entries = self._get_entries(pointer) + dirs, _ = self._get_entries(pointer) if line not in dirs: print "No such child" @@ -232,7 +234,7 @@ class ConfigShell(cmd.Cmd): """ pointer = self.parents[-1] - dirs, entries = self._get_entries(pointer) + _, entries = self._get_entries(pointer) matches = [name for name in entries if name.startswith(text)] return matches @@ -244,7 +246,7 @@ class ConfigShell(cmd.Cmd): """ pointer = self.parents[-1] - dirs, entries = self._get_entries(pointer) + _, entries = self._get_entries(pointer) if line not in entries: print "No such entry" return False @@ -284,7 +286,7 @@ class ConfigShell(cmd.Cmd): if self.cfg.VerifyConfig(): print "Config data does not validate, refusing to save." return False - self.cfg._WriteConfig() + self.cfg._WriteConfig() # pylint: disable-msg=W0212 def do_rm(self, line): """Removes an instance or a node. @@ -294,7 +296,7 @@ class ConfigShell(cmd.Cmd): """ pointer = self.parents[-1] - data = self.cfg._config_data + data = self.cfg._config_data # pylint: disable-msg=W0212 if pointer not in (data.instances, data.nodes): print "Can only delete instances and nodes" return False @@ -309,14 +311,16 @@ class ConfigShell(cmd.Cmd): else: print "Invalid node name" - def do_EOF(self, line): + @staticmethod + def do_EOF(line): """Exit the application. """ print return True - def do_quit(self, line): + @staticmethod + def do_quit(line): """Exit the application. """ @@ -351,7 +355,7 @@ def main(): This is just a wrapper over BootStrap, to handle our own exceptions. """ - options, args = ParseOptions() + _, args = ParseOptions() if args: cfg_file = args[0] else: diff --git a/tools/cfgupgrade b/tools/cfgupgrade index 117fc13..e7be591 100755 --- a/tools/cfgupgrade +++ b/tools/cfgupgrade @@ -32,7 +32,6 @@ import os.path import sys import optparse import logging -import errno from ganeti import constants from ganeti import serializer @@ -98,7 +97,7 @@ def main(): """Main program. """ - global options, args + global options, args # pylint: disable-msg=W0603 program = os.path.basename(sys.argv[0]) @@ -198,7 +197,7 @@ def main(): if vrfy: logging.error("Errors after conversion:") for item in vrfy: - logging.error(" - %s" % item) + logging.error(" - %s", item) del cfg logging.info("File loaded successfully") diff --git a/tools/lvmstrap b/tools/lvmstrap index 223f6fd..01ad670 100755 --- a/tools/lvmstrap +++ b/tools/lvmstrap @@ -130,7 +130,7 @@ def ParseOptions(): Returns: (options, args), as returned by OptionParser.parse_args """ - global verbose_flag + global verbose_flag # pylint: disable-msg=W0603 parser = optparse.OptionParser(usage="\n%s" % USAGE, version="%%prog (ganeti) %s" % @@ -192,7 +192,7 @@ def CheckPrereq(): if os.getuid() != 0: raise PrereqError("This tool runs as root only. Really.") - osname, nodename, release, version, arch = os.uname() + osname, _, release, _, _ = os.uname() if osname != 'Linux': raise PrereqError("This tool only runs on Linux" " (detected OS: %s)." % osname) @@ -271,7 +271,7 @@ def CheckSysDev(name, devnum): """ path = "/dev/%s" % name - for retries in range(40): + for _ in range(40): if os.path.exists(path): break time.sleep(0.250) @@ -426,7 +426,7 @@ def GetMountInfo(): mountlines = ReadFile("/proc/mounts").splitlines() mounts = {} for line in mountlines: - device, mountpoint, fstype, rest = line.split(None, 3) + _, mountpoint, fstype, _ = line.split(None, 3) # fs type blacklist if fstype in ["nfs", "nfs4", "autofs", "tmpfs", "proc", "sysfs"]: continue @@ -542,7 +542,7 @@ def CheckReread(name): boolean, the in-use status of the device """ - for retries in range(3): + for _ in range(3): result = ExecCommand("blockdev --rereadpt /dev/%s" % name) if not result.failed: break @@ -672,7 +672,7 @@ def ValidateDiskList(options): " non-removable block devices).") sysd_free = [] sysd_used = [] - for name, size, dev, part, used in sysdisks: + for name, _, _, _, used in sysdisks: if used: sysd_used.append(name) else: @@ -726,7 +726,7 @@ def BootStrap(): CreatePVOnDisk(disk) CreateVG(vgname, disklist) - status, lv_count, size, free = CheckVGExists(vgname) + status, lv_count, size, _ = CheckVGExists(vgname) if status: print "Done! %s: size %s GiB, disks: %s" % (vgname, size, ",".join(disklist))