Revision 7c4d6c7b

b/lib/backend.py
141 141
    master_netdev = cfg.GetMasterNetdev()
142 142
    master_ip = cfg.GetMasterIP()
143 143
    master_node = cfg.GetMasterNode()
144
  except errors.ConfigurationError, err:
144
  except errors.ConfigurationError:
145 145
    logging.exception("Cluster configuration incomplete")
146 146
    return (None, None, None)
147 147
  return (master_netdev, master_ip, master_node)
......
573 573
    try:
574 574
      names = hypervisor.GetHypervisor(hname).ListInstances()
575 575
      results.extend(names)
576
    except errors.HypervisorError, err:
576
    except errors.HypervisorError:
577 577
      logging.exception("Error enumerating instances for hypevisor %s", hname)
578 578
      raise
579 579

  
......
918 918
  # test every 10secs for 2min
919 919

  
920 920
  time.sleep(1)
921
  for dummy in range(11):
921
  for _ in range(11):
922 922
    if instance.name not in GetInstanceList([hv_name]):
923 923
      break
924 924
    time.sleep(10)
......
2024 2024
        # but we don't have the owner here - maybe parse from existing
2025 2025
        # cache? for now, we only lose lvm data when we rename, which
2026 2026
        # is less critical than DRBD or MD
2027
    except errors.BlockDeviceError, err:
2027
    except errors.BlockDeviceError:
2028 2028
      logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
2029 2029
      result = False
2030 2030
  return result
......
2109 2109
      # deletes dir only if empty, otherwise we want to return False
2110 2110
      try:
2111 2111
        os.rmdir(file_storage_dir)
2112
      except OSError, err:
2112
      except OSError:
2113 2113
        logging.exception("Cannot remove file storage directory '%s'",
2114 2114
                          file_storage_dir)
2115 2115
        result = False,
......
2138 2138
      if os.path.isdir(old_file_storage_dir):
2139 2139
        try:
2140 2140
          os.rename(old_file_storage_dir, new_file_storage_dir)
2141
        except OSError, err:
2141
        except OSError:
2142 2142
          logging.exception("Cannot rename '%s' to '%s'",
2143 2143
                            old_file_storage_dir, new_file_storage_dir)
2144 2144
          result =  False,
......
2544 2544
    dir_name = "%s/%s" % (self._BASE_DIR, subdir)
2545 2545
    try:
2546 2546
      dir_contents = utils.ListVisibleFiles(dir_name)
2547
    except OSError, err:
2547
    except OSError:
2548 2548
      # FIXME: must log output in case of failures
2549 2549
      return rr
2550 2550

  
......
2667 2667
    fdata = "%s %s %s\n" % (str(owner), state, iv_name)
2668 2668
    try:
2669 2669
      utils.WriteFile(fpath, data=fdata)
2670
    except EnvironmentError, err:
2670
    except EnvironmentError:
2671 2671
      logging.exception("Can't update bdev cache for %s", dev_path)
2672 2672

  
2673 2673
  @classmethod
......
2689 2689
    fpath = cls._ConvertPath(dev_path)
2690 2690
    try:
2691 2691
      utils.RemoveFile(fpath)
2692
    except EnvironmentError, err:
2692
    except EnvironmentError:
2693 2693
      logging.exception("Can't update bdev cache for %s", dev_path)
b/lib/cmdlib.py
26 26
import os
27 27
import os.path
28 28
import time
29
import tempfile
30 29
import re
31 30
import platform
32 31
import logging
33 32
import copy
34
import random
35 33

  
36 34
from ganeti import ssh
37 35
from ganeti import utils
......
40 38
from ganeti import locking
41 39
from ganeti import constants
42 40
from ganeti import objects
43
from ganeti import opcodes
44 41
from ganeti import serializer
45 42
from ganeti import ssconf
46 43

  
......
454 451

  
455 452
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
456 453
                          memory, vcpus, nics, disk_template, disks,
457
                          bep, hvp, hypervisor):
454
                          bep, hvp, hypervisor_name):
458 455
  """Builds instance related env variables for hooks
459 456

  
460 457
  This builds the hook environment from individual variables.
......
484 481
  @param bep: the backend parameters for the instance
485 482
  @type hvp: dict
486 483
  @param hvp: the hypervisor parameters for the instance
487
  @type hypervisor: string
488
  @param hypervisor: the hypervisor for the instance
484
  @type hypervisor_name: string
485
  @param hypervisor_name: the hypervisor for the instance
489 486
  @rtype: dict
490 487
  @return: the hook environment for this instance
491 488

  
......
504 501
    "INSTANCE_MEMORY": memory,
505 502
    "INSTANCE_VCPUS": vcpus,
506 503
    "INSTANCE_DISK_TEMPLATE": disk_template,
507
    "INSTANCE_HYPERVISOR": hypervisor,
504
    "INSTANCE_HYPERVISOR": hypervisor_name,
508 505
  }
509 506

  
510 507
  if nics:
......
1293 1290

  
1294 1291
    node_lvs = self.rpc.call_volume_list(nodes, vg_name)
1295 1292

  
1296
    to_act = set()
1297 1293
    for node in nodes:
1298 1294
      # node_volume
1299 1295
      lvs = node_lvs[node]
......
2513 2509
      "master": cluster.master_node,
2514 2510
      "default_hypervisor": cluster.default_hypervisor,
2515 2511
      "enabled_hypervisors": cluster.enabled_hypervisors,
2516
      "hvparams": dict([(hypervisor, cluster.hvparams[hypervisor])
2517
                        for hypervisor in cluster.enabled_hypervisors]),
2512
      "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor])
2513
                        for hypervisor_name in cluster.enabled_hypervisors]),
2518 2514
      "beparams": cluster.beparams,
2519 2515
      "candidate_pool_size": cluster.candidate_pool_size,
2520 2516
      "default_bridge": cluster.default_bridge,
......
2677 2673
  """Start the disks of an instance.
2678 2674

  
2679 2675
  """
2680
  disks_ok, dummy = _AssembleInstanceDisks(lu, instance,
2676
  disks_ok, _ = _AssembleInstanceDisks(lu, instance,
2681 2677
                                           ignore_secondaries=force)
2682 2678
  if not disks_ok:
2683 2679
    _ShutdownInstanceDisks(lu, instance)
......
3675 3671
      logging.info("Starting instance %s on node %s",
3676 3672
                   instance.name, target_node)
3677 3673

  
3678
      disks_ok, dummy = _AssembleInstanceDisks(self, instance,
3674
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
3679 3675
                                               ignore_secondaries=True)
3680 3676
      if not disks_ok:
3681 3677
        _ShutdownInstanceDisks(self, instance)
......
5431 5427
    logging.debug("Allocated minors %s" % (minors,))
5432 5428
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
5433 5429
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
5434
      size = dev.size
5435 5430
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
5436 5431
      # create new devices on new_node; note that we create two IDs:
5437 5432
      # one without port, so the drbd will be activated without
......
5966 5961
    This only checks the instance list against the existing names.
5967 5962

  
5968 5963
    """
5969
    force = self.force = self.op.force
5964
    self.force = self.op.force
5970 5965

  
5971 5966
    # checking the new params on the primary/secondary nodes
5972 5967

  
......
6942 6937
    """
6943 6938
    if call_fn is None:
6944 6939
      call_fn = self.lu.rpc.call_iallocator_runner
6945
    data = self.in_text
6946 6940

  
6947 6941
    result = call_fn(self.lu.cfg.GetMasterNode(), name, self.in_text)
6948 6942
    result.Raise()
b/lib/config.py
474 474
    def _AppendUsedPorts(instance_name, disk, used):
475 475
      duplicates = []
476 476
      if disk.dev_type == constants.LD_DRBD8 and len(disk.logical_id) >= 5:
477
        nodeA, nodeB, dummy, minorA, minorB = disk.logical_id[:5]
478
        for node, port in ((nodeA, minorA), (nodeB, minorB)):
477
        node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
478
        for node, port in ((node_a, minor_a), (node_b, minor_b)):
479 479
          assert node in used, ("Node '%s' of instance '%s' not found"
480 480
                                " in node list" % (node, instance_name))
481 481
          if port in used[node]:
b/lib/http/__init__.py
367 367
  # TODO: event_poll/event_check/override
368 368
  if op in (SOCKOP_SEND, SOCKOP_HANDSHAKE):
369 369
    event_poll = select.POLLOUT
370
    event_check = select.POLLOUT
371 370

  
372 371
  elif op == SOCKOP_RECV:
373 372
    event_poll = select.POLLIN
374
    event_check = select.POLLIN | select.POLLPRI
375 373

  
376 374
  elif op == SOCKOP_SHUTDOWN:
377 375
    event_poll = None
378
    event_check = None
379 376

  
380 377
    # The timeout is only used when OpenSSL requests polling for a condition.
381 378
    # It is not advisable to have no timeout for shutdown.
b/lib/http/auth.py
23 23
"""
24 24

  
25 25
import logging
26
import time
27 26
import re
28 27
import base64
29 28
import binascii
30 29

  
31
from ganeti import constants
32 30
from ganeti import utils
33 31
from ganeti import http
34 32

  
b/lib/http/client.py
22 22

  
23 23
"""
24 24

  
25
import BaseHTTPServer
26
import cgi
27
import logging
28
import OpenSSL
29 25
import os
30 26
import select
31 27
import socket
32
import sys
33
import time
34
import signal
35 28
import errno
36 29
import threading
37 30

  
38
from ganeti import constants
39
from ganeti import serializer
40 31
from ganeti import workerpool
41
from ganeti import utils
42 32
from ganeti import http
43 33

  
44 34

  
b/lib/http/server.py
31 31
import time
32 32
import signal
33 33

  
34
from ganeti import constants
35
from ganeti import serializer
36
from ganeti import utils
37 34
from ganeti import http
38 35

  
39 36

  
......
498 495
          # As soon as too many children run, we'll not respond to new
499 496
          # requests. The real solution would be to add a timeout for children
500 497
          # and killing them after some time.
501
          pid, status = os.waitpid(0, 0)
498
          pid, _ = os.waitpid(0, 0)
502 499
        except os.error:
503 500
          pid = None
504 501
        if pid and pid in self._children:
b/lib/hypervisor/hv_fake.py
25 25

  
26 26
import os
27 27
import os.path
28
import re
29 28

  
30 29
from ganeti import utils
31 30
from ganeti import constants
b/lib/hypervisor/hv_xen.py
89 89
    @return: list of (name, id, memory, vcpus, state, time spent)
90 90

  
91 91
    """
92
    for dummy in range(5):
92
    for _ in range(5):
93 93
      result = utils.RunCmd(["xm", "list"])
94 94
      if not result.failed:
95 95
        break
b/lib/jstore.py
22 22
"""Module implementing the job queue handling."""
23 23

  
24 24
import os
25
import logging
26 25
import errno
27
import re
28 26

  
29 27
from ganeti import constants
30 28
from ganeti import errors
b/lib/luxi.py
284 284
      old_transp = self.transport
285 285
      self.transport = None
286 286
      old_transp.Close()
287
    except Exception, err:
287
    except Exception:
288 288
      pass
289 289

  
290 290
  def CallMethod(self, method, args):
b/lib/mcpu.py
361 361
    phase = constants.HOOKS_PHASE_POST
362 362
    hpath = constants.HOOKS_NAME_CFGUPDATE
363 363
    nodes = [self.lu.cfg.GetMasterNode()]
364
    results = self._RunWrapper(nodes, hpath, phase)
364
    self._RunWrapper(nodes, hpath, phase)
b/lib/objects.py
504 504
    """Checks that this disk is correctly configured.
505 505

  
506 506
    """
507
    errors = []
507
    all_errors = []
508 508
    if self.mode not in constants.DISK_ACCESS_SET:
509
      errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
510
    return errors
509
      all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
510
    return all_errors
511 511

  
512 512

  
513 513
class Instance(TaggableObject):
b/lib/rapi/baserlib.py
25 25

  
26 26
import logging
27 27

  
28
import ganeti.cli
29

  
30 28
from ganeti import luxi
31 29
from ganeti import rapi
32 30
from ganeti import http
......
247 245
        val = 0
248 246
    try:
249 247
      val = int(val)
250
    except (ValueError, TypeError), err:
248
    except (ValueError, TypeError):
251 249
      raise http.HttpBadRequest("Invalid value for the"
252 250
                                " '%s' parameter" % (name,))
253 251
    return val
b/lib/rpc.py
31 31
# R0904: Too many public methods
32 32

  
33 33
import os
34
import socket
35 34
import logging
36 35
import zlib
37 36
import base64
b/lib/utils.py
27 27
"""
28 28

  
29 29

  
30
import sys
31 30
import os
32 31
import time
33 32
import subprocess
......
59 58
_locksheld = []
60 59
_re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
61 60

  
62
debug = False
63 61
debug_locks = False
64 62

  
65 63
#: when set to True, L{RunCmd} is disabled
......
687 685
  """
688 686
  try:
689 687
    nv = fn(val)
690
  except (ValueError, TypeError), err:
688
  except (ValueError, TypeError):
691 689
    nv = val
692 690
  return nv
693 691

  
......
1097 1095
  if source is not None:
1098 1096
    try:
1099 1097
      sock.bind((source, 0))
1100
    except socket.error, (errcode, errstring):
1098
    except socket.error, (errcode, _):
1101 1099
      if errcode == errno.EADDRNOTAVAIL:
1102 1100
        success = False
1103 1101

  
......
1360 1358

  
1361 1359
def all(seq, pred=bool):
1362 1360
  "Returns True if pred(x) is True for every element in the iterable"
1363
  for elem in itertools.ifilterfalse(pred, seq):
1361
  for _ in itertools.ifilterfalse(pred, seq):
1364 1362
    return False
1365 1363
  return True
1366 1364

  
1367 1365

  
1368 1366
def any(seq, pred=bool):
1369 1367
  "Returns True if pred(x) is True for at least one element in the iterable"
1370
  for elem in itertools.ifilter(pred, seq):
1368
  for _ in itertools.ifilter(pred, seq):
1371 1369
    return True
1372 1370
  return False
1373 1371

  
......
1552 1550
  @param name: the daemon name used to derive the pidfile name
1553 1551

  
1554 1552
  """
1555
  pid = os.getpid()
1556 1553
  pidfilename = DaemonPidFileName(name)
1557 1554
  # TODO: we could check here that the file contains our pid
1558 1555
  try:

Also available in: Unified diff