Revision 5ae4945a

b/Makefile.am
1308 1308
	HBINARY="./htools/hpc-htools" ./htools/offline-test.sh
1309 1309

  
1310 1310
# E111: indentation is not a multiple of four
1311
# E121: continuation line indentation is not a multiple of four
1312
#       (since our indent level is not 4)
1313
# E125: continuation line does not distinguish itself from next logical line
1314
#       (since our indent level is not 4)
1315
# E127: continuation line over-indented for visual indent
1316
#       (since our indent level is not 4)
1317
# note: do NOT add E128 here; it's a valid style error in most cases!
1318
# I've seen real errors, but also some cases were we indent wrongly
1319
# due to line length; try to rework the cases where it is triggered,
1320
# instead of silencing it
1311 1321
# E261: at least two spaces before inline comment
1312 1322
# E501: line too long (80 characters)
1313
PEP8_IGNORE = E111,E261,E501
1323
PEP8_IGNORE = E111,E121,E125,E127,E261,E501
1314 1324

  
1315 1325
# For excluding pep8 expects filenames only, not whole paths
1316 1326
PEP8_EXCLUDE = $(subst $(space),$(comma),$(strip $(notdir $(BUILT_PYTHON_SOURCES))))
b/autotools/build-bash-completion
1 1
#!/usr/bin/python
2 2
#
3 3

  
4
# Copyright (C) 2009 Google Inc.
4
# Copyright (C) 2009, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
108 108
  sw.Write("}")
109 109

  
110 110
  for (fnname, paths) in [
111
      ("os", constants.OS_SEARCH_PATH),
112
      ("iallocator", constants.IALLOCATOR_SEARCH_PATH),
113
      ]:
111
    ("os", constants.OS_SEARCH_PATH),
112
    ("iallocator", constants.IALLOCATOR_SEARCH_PATH),
113
    ]:
114 114
    sw.Write("_ganeti_%s() {", fnname)
115 115
    sw.IncIndent()
116 116
    try:
b/doc/devnotes.rst
35 35
The same with pep8, other versions may give you errors::
36 36

  
37 37
     $ pep8 --version
38
     0.6.1
38
     1.2
39 39

  
40 40
To generate unittest coverage reports (``make coverage``), `coverage
41 41
<http://pypi.python.org/pypi/coverage>`_ needs to be installed.
......
49 49
               logilab-astng==0.20.1 \
50 50
               logilab-common==0.50.3 \
51 51
               pylint==0.21.1 \
52
               pep8==0.6.1 \
52
               pep8==1.2 \
53 53
               coverage
54 54

  
55 55
For Haskell development, again all things from the quick install
b/lib/backend.py
252 252
  except errors.ConfigurationError, err:
253 253
    _Fail("Cluster configuration incomplete: %s", err, exc=True)
254 254
  return (master_netdev, master_ip, master_node, primary_ip_family,
255
      master_netmask)
255
          master_netmask)
256 256

  
257 257

  
258 258
def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
......
698 698
    else:
699 699
      source = None
700 700
    result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
701
                                                  source=source)
701
                                                     source=source)
702 702

  
703 703
  if constants.NV_USERSCRIPTS in what:
704 704
    result[constants.NV_USERSCRIPTS] = \
......
3602 3602

  
3603 3603
    runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
3604 3604

  
3605
    for (relname, relstatus, runresult)  in runparts_results:
3605
    for (relname, relstatus, runresult) in runparts_results:
3606 3606
      if relstatus == constants.RUNPARTS_SKIP:
3607 3607
        rrval = constants.HKR_SKIP
3608 3608
        output = ""
b/lib/bdev.py
990 990
                                    first_line)
991 991

  
992 992
    values = version.groups()
993
    retval = {"k_major": int(values[0]),
994
              "k_minor": int(values[1]),
995
              "k_point": int(values[2]),
996
              "api": int(values[3]),
997
              "proto": int(values[4]),
998
             }
993
    retval = {
994
      "k_major": int(values[0]),
995
      "k_minor": int(values[1]),
996
      "k_point": int(values[2]),
997
      "api": int(values[3]),
998
      "proto": int(values[4]),
999
      }
999 1000
    if values[5] is not None:
1000 1001
      retval["proto2"] = values[5]
1001 1002

  
......
1393 1394

  
1394 1395
  @classmethod
1395 1396
  def _ComputeDiskBarrierArgs(cls, vmaj, vmin, vrel, disabled_barriers,
1396
      disable_meta_flush):
1397
                              disable_meta_flush):
1397 1398
    """Compute the DRBD command line parameters for disk barriers
1398 1399

  
1399 1400
    Returns a list of the disk barrier parameters as requested via the
......
1627 1628
                   "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1628 1629
                   "--c-max-rate", params[constants.LDP_MAX_RATE],
1629 1630
                   "--c-min-rate", params[constants.LDP_MIN_RATE],
1630
                  ])
1631
                   ])
1631 1632

  
1632 1633
    else:
1633 1634
      args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
b/lib/bootstrap.py
779 779
  msg = result.fail_msg
780 780
  if msg:
781 781
    logging.error("Could not disable the master role on the old master"
782
                 " %s, please disable manually: %s", old_master, msg)
782
                  " %s, please disable manually: %s", old_master, msg)
783 783

  
784 784
  logging.info("Checking master IP non-reachability...")
785 785

  
b/lib/cli.py
788 788
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
789 789

  
790 790
DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
791
                            metavar="<NAME>",
792
                            help="Set the default instance allocator plugin",
793
                            default=None, type="string",
794
                            completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
791
                                    metavar="<NAME>",
792
                                    help="Set the default instance"
793
                                    " allocator plugin",
794
                                    default=None, type="string",
795
                                    completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
795 796

  
796 797
OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
797 798
                    metavar="<os>",
798 799
                    completion_suggest=OPT_COMPL_ONE_OS)
799 800

  
800 801
OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
801
                         type="keyval", default={},
802
                         help="OS parameters")
802
                          type="keyval", default={},
803
                          help="OS parameters")
803 804

  
804 805
FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
805 806
                               action="store_true", default=False,
......
848 849
SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size",
849 850
                                 type="keyval", default={},
850 851
                                 help="Disk size specs: list of key=value,"
851
                                " where key is one of min, max, std"
852
                                 " where key is one of min, max, std"
852 853
                                 " (in MB or using a unit)")
853 854

  
854 855
SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count",
......
857 858
                                 " where key is one of min, max, std")
858 859

  
859 860
IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates",
860
                                 dest="ipolicy_disk_templates",
861
                                 type="list", default=None,
862
                                 help="Comma-separated list of"
863
                                 " enabled disk templates")
861
                                    dest="ipolicy_disk_templates",
862
                                    type="list", default=None,
863
                                    help="Comma-separated list of"
864
                                    " enabled disk templates")
864 865

  
865 866
IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio",
866 867
                                 dest="ipolicy_vcpu_ratio",
......
1087 1088
                               " (excluded from allocation operations)"))
1088 1089

  
1089 1090
CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
1090
                    type="bool", default=None, metavar=_YORNO,
1091
                    help="Set the master_capable flag on the node")
1091
                              type="bool", default=None, metavar=_YORNO,
1092
                              help="Set the master_capable flag on the node")
1092 1093

  
1093 1094
CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
1094
                    type="bool", default=None, metavar=_YORNO,
1095
                    help="Set the vm_capable flag on the node")
1095
                          type="bool", default=None, metavar=_YORNO,
1096
                          help="Set the vm_capable flag on the node")
1096 1097

  
1097 1098
ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
1098 1099
                             type="bool", default=None, metavar=_YORNO,
......
1149 1150
                                default=None)
1150 1151

  
1151 1152
USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script",
1152
                                dest="use_external_mip_script",
1153
                                help="Specify whether to run a user-provided"
1154
                                " script for the master IP address turnup and"
1155
                                " turndown operations",
1156
                                type="bool", metavar=_YORNO, default=None)
1153
                                     dest="use_external_mip_script",
1154
                                     help="Specify whether to run a"
1155
                                     " user-provided script for the master"
1156
                                     " IP address turnup and"
1157
                                     " turndown operations",
1158
                                     type="bool", metavar=_YORNO, default=None)
1157 1159

  
1158 1160
GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
1159 1161
                                help="Specify the default directory (cluster-"
......
1162 1164
                                metavar="DIR",
1163 1165
                                default=constants.DEFAULT_FILE_STORAGE_DIR)
1164 1166

  
1165
GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir",
1166
                            dest="shared_file_storage_dir",
1167
                            help="Specify the default directory (cluster-"
1168
                            "wide) for storing the shared file-based"
1169
                            " disks [%s]" %
1170
                            constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1171
                            metavar="SHAREDDIR",
1172
                            default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1167
GLOBAL_SHARED_FILEDIR_OPT = cli_option(
1168
  "--shared-file-storage-dir",
1169
  dest="shared_file_storage_dir",
1170
  help="Specify the default directory (cluster-wide) for storing the"
1171
  " shared file-based disks [%s]" %
1172
  constants.DEFAULT_SHARED_FILE_STORAGE_DIR,
1173
  metavar="SHAREDDIR", default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR)
1173 1174

  
1174 1175
NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
1175 1176
                                   help="Don't modify /etc/hosts",
......
1207 1208
                         help="Maximum time to wait")
1208 1209

  
1209 1210
SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1210
                         dest="shutdown_timeout", type="int",
1211
                         default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1212
                         help="Maximum time to wait for instance shutdown")
1211
                                  dest="shutdown_timeout", type="int",
1212
                                  default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1213
                                  help="Maximum time to wait for instance"
1214
                                  " shutdown")
1213 1215

  
1214 1216
INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1215 1217
                          default=None,
......
1237 1239
                                     " certificate"))
1238 1240

  
1239 1241
SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert",
1240
                           default=None,
1241
                           help="File containing new SPICE certificate")
1242
                            default=None,
1243
                            help="File containing new SPICE certificate")
1242 1244

  
1243 1245
SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert",
1244
                           default=None,
1245
                           help="File containing the certificate of the CA"
1246
                                " which signed the SPICE certificate")
1246
                              default=None,
1247
                              help="File containing the certificate of the CA"
1248
                              " which signed the SPICE certificate")
1247 1249

  
1248 1250
NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate",
1249
                               dest="new_spice_cert", default=None,
1250
                               action="store_true",
1251
                               help=("Generate a new self-signed SPICE"
1252
                                     " certificate"))
1251
                                dest="new_spice_cert", default=None,
1252
                                action="store_true",
1253
                                help=("Generate a new self-signed SPICE"
1254
                                      " certificate"))
1253 1255

  
1254 1256
NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1255 1257
                                    dest="new_confd_hmac_key",
......
1307 1309
                                   " removed from the user-id pool"))
1308 1310

  
1309 1311
RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1310
                             action="store", dest="reserved_lvs",
1311
                             help=("A comma-separated list of reserved"
1312
                                   " logical volumes names, that will be"
1313
                                   " ignored by cluster verify"))
1312
                              action="store", dest="reserved_lvs",
1313
                              help=("A comma-separated list of reserved"
1314
                                    " logical volumes names, that will be"
1315
                                    " ignored by cluster verify"))
1314 1316

  
1315 1317
ROMAN_OPT = cli_option("--roman",
1316 1318
                       dest="roman_integers", default=False,
......
1365 1367
                              help="Specify if the SoR for node is powered")
1366 1368

  
1367 1369
OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int",
1368
                         default=constants.OOB_TIMEOUT,
1369
                         help="Maximum time to wait for out-of-band helper")
1370
                             default=constants.OOB_TIMEOUT,
1371
                             help="Maximum time to wait for out-of-band helper")
1370 1372

  
1371 1373
POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float",
1372 1374
                             default=constants.OOB_POWER_DELAY,
......
2143 2145
  elif isinstance(err, errors.OpPrereqError):
2144 2146
    if len(err.args) == 2:
2145 2147
      obuf.write("Failure: prerequisites not met for this"
2146
               " operation:\nerror type: %s, error details:\n%s" %
2148
                 " operation:\nerror type: %s, error details:\n%s" %
2147 2149
                 (err.args[1], err.args[0]))
2148 2150
    else:
2149 2151
      obuf.write("Failure: prerequisites not met for this"
b/lib/client/gnt_cluster.py
49 49
                    help="Recover from an EPO")
50 50

  
51 51
GROUPS_OPT = cli_option("--groups", default=False,
52
                    action="store_true", dest="groups",
53
                    help="Arguments are node groups instead of nodes")
52
                        action="store_true", dest="groups",
53
                        help="Arguments are node groups instead of nodes")
54 54

  
55 55
SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False,
56 56
                              action="store_true",
......
787 787
  return pem
788 788

  
789 789

  
790
def _RenewCrypto(new_cluster_cert, new_rapi_cert, #pylint: disable=R0911
790
def _RenewCrypto(new_cluster_cert, new_rapi_cert, # pylint: disable=R0911
791 791
                 rapi_cert_filename, new_spice_cert, spice_cert_filename,
792 792
                 spice_cacert_filename, new_confd_hmac_key, new_cds,
793 793
                 cds_filename, force):
b/lib/client/gnt_instance.py
116 116
      if not names:
117 117
        raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
118 118
      ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
119
                              False)
119
                                False)
120 120

  
121 121
    ipri = [row[1] for row in ndata]
122 122
    pri_names = list(itertools.chain(*ipri))
b/lib/client/gnt_job.py
92 92
    "summary": (lambda value: ",".join(str(item) for item in value), False),
93 93
    }
94 94
  fmtoverride.update(dict.fromkeys(["opstart", "opexec", "opend"],
95
    (lambda value: map(FormatTimestamp, value), None)))
95
                                   (lambda value: map(FormatTimestamp, value),
96
                                    None)))
96 97

  
97 98
  qfilter = qlang.MakeSimpleFilter("status", opts.status_filter)
98 99

  
b/lib/cmdlib.py
1208 1208
                     disk_sizes, spindle_use)
1209 1209

  
1210 1210

  
1211
def _ComputeIPolicyInstanceSpecViolation(ipolicy, instance_spec,
1212
    _compute_fn=_ComputeIPolicySpecViolation):
1211
def _ComputeIPolicyInstanceSpecViolation(
1212
  ipolicy, instance_spec, _compute_fn=_ComputeIPolicySpecViolation):
1213 1213
  """Compute if instance specs meets the specs of ipolicy.
1214 1214

  
1215 1215
  @type ipolicy: dict
......
1920 1920
      # Always depend on global verification
1921 1921
      depends_fn = lambda: [(-len(jobs), [])]
1922 1922

  
1923
    jobs.extend([opcodes.OpClusterVerifyGroup(group_name=group,
1924
                                            ignore_errors=self.op.ignore_errors,
1925
                                            depends=depends_fn())]
1926
                for group in groups)
1923
    jobs.extend(
1924
      [opcodes.OpClusterVerifyGroup(group_name=group,
1925
                                    ignore_errors=self.op.ignore_errors,
1926
                                    depends=depends_fn())]
1927
      for group in groups)
1927 1928

  
1928 1929
    # Fix up all parameters
1929 1930
    for op in itertools.chain(*jobs): # pylint: disable=W0142
......
2645 2646

  
2646 2647
    if drbd_helper:
2647 2648
      helper_result = nresult.get(constants.NV_DRBDHELPER, None)
2648
      test = (helper_result == None)
2649
      test = (helper_result is None)
2649 2650
      _ErrorIf(test, constants.CV_ENODEDRBDHELPER, node,
2650 2651
               "no drbd usermode helper returned")
2651 2652
      if helper_result:
......
3572 3573
    res_instances = set()
3573 3574
    res_missing = {}
3574 3575

  
3575
    nv_dict = _MapInstanceDisksToNodes([inst
3576
            for inst in self.instances.values()
3577
            if inst.admin_state == constants.ADMINST_UP])
3576
    nv_dict = _MapInstanceDisksToNodes(
3577
      [inst for inst in self.instances.values()
3578
       if inst.admin_state == constants.ADMINST_UP])
3578 3579

  
3579 3580
    if nv_dict:
3580 3581
      nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
......
4330 4331
    files_mc.add(constants.CLUSTER_CONF_FILE)
4331 4332

  
4332 4333
  # Files which should only be on VM-capable nodes
4333
  files_vm = set(filename
4334
  files_vm = set(
4335
    filename
4334 4336
    for hv_name in cluster.enabled_hypervisors
4335 4337
    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
4336 4338

  
4337
  files_opt |= set(filename
4339
  files_opt |= set(
4340
    filename
4338 4341
    for hv_name in cluster.enabled_hypervisors
4339 4342
    for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
4340 4343

  
......
4757 4760
                    type(result.payload))
4758 4761

  
4759 4762
    if self.op.command in [
4760
        constants.OOB_POWER_ON,
4761
        constants.OOB_POWER_OFF,
4762
        constants.OOB_POWER_CYCLE,
4763
        ]:
4763
      constants.OOB_POWER_ON,
4764
      constants.OOB_POWER_OFF,
4765
      constants.OOB_POWER_CYCLE,
4766
      ]:
4764 4767
      if result.payload is not None:
4765 4768
        errs.append("%s is expected to not return payload but got '%s'" %
4766 4769
                    (self.op.command, result.payload))
......
5636 5639
    if not newbie_singlehomed:
5637 5640
      # check reachability from my secondary ip to newbie's secondary ip
5638 5641
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
5639
                           source=myself.secondary_ip):
5642
                              source=myself.secondary_ip):
5640 5643
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
5641 5644
                                   " based ping to node daemon port",
5642 5645
                                   errors.ECODE_ENVIRON)
......
5814 5817
                                 errors.ECODE_INVAL)
5815 5818

  
5816 5819
    # Boolean value that tells us whether we might be demoting from MC
5817
    self.might_demote = (self.op.master_candidate == False or
5818
                         self.op.offline == True or
5819
                         self.op.drained == True or
5820
                         self.op.master_capable == False)
5820
    self.might_demote = (self.op.master_candidate is False or
5821
                         self.op.offline is True or
5822
                         self.op.drained is True or
5823
                         self.op.master_capable is False)
5821 5824

  
5822 5825
    if self.op.secondary_ip:
5823 5826
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
......
5918 5921
                                 " it a master candidate" % node.name,
5919 5922
                                 errors.ECODE_STATE)
5920 5923

  
5921
    if self.op.vm_capable == False:
5924
    if self.op.vm_capable is False:
5922 5925
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
5923 5926
      if ipri or isec:
5924 5927
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
......
5944 5947

  
5945 5948
    # Check for ineffective changes
5946 5949
    for attr in self._FLAGS:
5947
      if (getattr(self.op, attr) == False and getattr(node, attr) == False):
5950
      if (getattr(self.op, attr) is False and getattr(node, attr) is False):
5948 5951
        self.LogInfo("Ignoring request to unset flag %s, already unset", attr)
5949 5952
        setattr(self.op, attr, None)
5950 5953

  
......
5954 5957
    # TODO: We might query the real power state if it supports OOB
5955 5958
    if _SupportsOob(self.cfg, node):
5956 5959
      if self.op.offline is False and not (node.powered or
5957
                                           self.op.powered == True):
5960
                                           self.op.powered is True):
5958 5961
        raise errors.OpPrereqError(("Node %s needs to be turned on before its"
5959 5962
                                    " offline status can be reset") %
5960 5963
                                   self.op.node_name, errors.ECODE_STATE)
......
5965 5968
                                 errors.ECODE_STATE)
5966 5969

  
5967 5970
    # If we're being deofflined/drained, we'll MC ourself if needed
5968
    if (self.op.drained == False or self.op.offline == False or
5971
    if (self.op.drained is False or self.op.offline is False or
5969 5972
        (self.op.master_capable and not node.master_capable)):
5970 5973
      if _DecideSelfPromotion(self):
5971 5974
        self.op.master_candidate = True
5972 5975
        self.LogInfo("Auto-promoting node to master candidate")
5973 5976

  
5974 5977
    # If we're no longer master capable, we'll demote ourselves from MC
5975
    if self.op.master_capable == False and node.master_candidate:
5978
    if self.op.master_capable is False and node.master_candidate:
5976 5979
      self.LogInfo("Demoting from master candidate")
5977 5980
      self.op.master_candidate = False
5978 5981

  
......
8279 8282
                                  ial.required_nodes), errors.ECODE_FAULT)
8280 8283
    self.target_node = ial.result[0]
8281 8284
    self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
8282
                 self.instance_name, self.lu.op.iallocator,
8283
                 utils.CommaJoin(ial.result))
8285
                    self.instance_name, self.lu.op.iallocator,
8286
                    utils.CommaJoin(ial.result))
8284 8287

  
8285 8288
  def _WaitUntilSync(self):
8286 8289
    """Poll with custom rpc for disk sync.
......
8450 8453
      # Don't raise an exception here, as we stil have to try to revert the
8451 8454
      # disk status, even if this step failed.
8452 8455

  
8453
    abort_result = self.rpc.call_instance_finalize_migration_src(source_node,
8454
        instance, False, self.live)
8456
    abort_result = self.rpc.call_instance_finalize_migration_src(
8457
      source_node, instance, False, self.live)
8455 8458
    abort_msg = abort_result.fail_msg
8456 8459
    if abort_msg:
8457 8460
      logging.error("Aborting migration failed on source node %s: %s",
......
8885 8888
  }
8886 8889

  
8887 8890

  
8888
def _GenerateDiskTemplate(lu, template_name, instance_name, primary_node,
8889
    secondary_nodes, disk_info, file_storage_dir, file_driver, base_index,
8890
    feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
8891
    _req_shr_file_storage=opcodes.RequireSharedFileStorage):
8891
def _GenerateDiskTemplate(
8892
  lu, template_name, instance_name, primary_node, secondary_nodes,
8893
  disk_info, file_storage_dir, file_driver, base_index,
8894
  feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
8895
  _req_shr_file_storage=opcodes.RequireSharedFileStorage):
8892 8896
  """Generate the entire disk layout for a given template type.
8893 8897

  
8894 8898
  """
......
9825 9829
    enabled_hvs = cluster.enabled_hypervisors
9826 9830
    if self.op.hypervisor not in enabled_hvs:
9827 9831
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
9828
                                 " cluster (%s)" % (self.op.hypervisor,
9829
                                  ",".join(enabled_hvs)),
9832
                                 " cluster (%s)" %
9833
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
9830 9834
                                 errors.ECODE_STATE)
9831 9835

  
9832 9836
    # Check tag validity
......
10547 10551
        assert not self.needed_locks[locking.LEVEL_NODE]
10548 10552

  
10549 10553
        # Lock member nodes of all locked groups
10550
        self.needed_locks[locking.LEVEL_NODE] = [node_name
10551
          for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
10552
          for node_name in self.cfg.GetNodeGroup(group_uuid).members]
10554
        self.needed_locks[locking.LEVEL_NODE] = \
10555
            [node_name
10556
             for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
10557
             for node_name in self.cfg.GetNodeGroup(group_uuid).members]
10553 10558
      else:
10554 10559
        self._LockInstancesNodes()
10555 10560
    elif level == locking.LEVEL_NODE_RES:
......
12337 12342
    if self.op.hvparams:
12338 12343
      _CheckGlobalHvParams(self.op.hvparams)
12339 12344

  
12340
    self.op.disks = \
12341
      self._UpgradeDiskNicMods("disk", self.op.disks,
12342
        opcodes.OpInstanceSetParams.TestDiskModifications)
12343
    self.op.nics = \
12344
      self._UpgradeDiskNicMods("NIC", self.op.nics,
12345
        opcodes.OpInstanceSetParams.TestNicModifications)
12345
    self.op.disks = self._UpgradeDiskNicMods(
12346
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
12347
    self.op.nics = self._UpgradeDiskNicMods(
12348
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
12346 12349

  
12347 12350
    # Check disk modifications
12348 12351
    self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
......
12642 12645
                           " free memory information" % pnode)
12643 12646
        elif instance_info.fail_msg:
12644 12647
          self.warn.append("Can't get instance runtime information: %s" %
12645
                          instance_info.fail_msg)
12648
                           instance_info.fail_msg)
12646 12649
        else:
12647 12650
          if instance_info.payload:
12648 12651
            current_mem = int(instance_info.payload["memory"])
......
12694 12697
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
12695 12698
        raise errors.OpPrereqError("Instance %s must have memory between %d"
12696 12699
                                   " and %d MB of memory unless --force is"
12697
                                   " given" % (instance.name,
12700
                                   " given" %
12701
                                   (instance.name,
12698 12702
                                    self.be_proposed[constants.BE_MINMEM],
12699 12703
                                    self.be_proposed[constants.BE_MAXMEM]),
12700 12704
                                   errors.ECODE_INVAL)
......
15202 15206
                       ht.TItems([ht.TNonEmptyString,
15203 15207
                                  ht.TNonEmptyString,
15204 15208
                                  ht.TListOf(ht.TNonEmptyString),
15205
                                 ])))
15209
                                  ])))
15206 15210
  _NEVAC_FAILED = \
15207 15211
    ht.TListOf(ht.TAnd(ht.TIsLength(2),
15208 15212
                       ht.TItems([ht.TNonEmptyString,
15209 15213
                                  ht.TMaybeString,
15210
                                 ])))
15214
                                  ])))
15211 15215
  _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
15212 15216
                          ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
15213 15217

  
b/lib/confd/client.py
1 1
#
2 2
#
3 3

  
4
# Copyright (C) 2009, 2010 Google Inc.
4
# Copyright (C) 2009, 2010, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
283 283
                                        server_port=port,
284 284
                                        extra_args=rq.args,
285 285
                                        client=self,
286
                                       )
286
                                        )
287 287
      self._callback(client_reply)
288 288

  
289 289
    finally:
b/lib/config.py
1054 1054

  
1055 1055
    """
1056 1056
    cluster = self._config_data.cluster
1057
    result = objects.MasterNetworkParameters(name=cluster.master_node,
1058
      ip=cluster.master_ip,
1059
      netmask=cluster.master_netmask,
1060
      netdev=cluster.master_netdev,
1057
    result = objects.MasterNetworkParameters(
1058
      name=cluster.master_node, ip=cluster.master_ip,
1059
      netmask=cluster.master_netmask, netdev=cluster.master_netdev,
1061 1060
      ip_family=cluster.primary_ip_family)
1062 1061

  
1063 1062
    return result
b/lib/constants.py
1816 1816
    HV_CPU_MASK: CPU_PINNING_ALL,
1817 1817
    HV_CPU_TYPE: "",
1818 1818
    },
1819
  HT_FAKE: {
1820
    },
1819
  HT_FAKE: {},
1821 1820
  HT_CHROOT: {
1822 1821
    HV_INIT_SCRIPT: "/ganeti-chroot",
1823 1822
    },
......
1870 1869
  LD_LV: {
1871 1870
    LDP_STRIPES: _autoconf.LVM_STRIPECOUNT
1872 1871
    },
1873
  LD_FILE: {
1874
    },
1875
  LD_BLOCKDEV: {
1876
    },
1872
  LD_FILE: {},
1873
  LD_BLOCKDEV: {},
1877 1874
  LD_RBD: {
1878 1875
    LDP_POOL: "rbd"
1879 1876
    },
......
1903 1900
    DRBD_MAX_RATE: _DRBD_DEFAULTS[LDP_MAX_RATE],
1904 1901
    DRBD_MIN_RATE: _DRBD_DEFAULTS[LDP_MIN_RATE],
1905 1902
    },
1906
  DT_DISKLESS: {
1907
    },
1908
  DT_FILE: {
1909
    },
1910
  DT_SHARED_FILE: {
1911
    },
1912
  DT_BLOCK: {
1913
    },
1903
  DT_DISKLESS: {},
1904
  DT_FILE: {},
1905
  DT_SHARED_FILE: {},
1906
  DT_BLOCK: {},
1914 1907
  DT_RBD: {
1915 1908
    RBD_POOL: DISK_LD_DEFAULTS[LD_RBD][LDP_POOL]
1916 1909
    },
b/lib/daemon.py
1 1
#
2 2
#
3 3

  
4
# Copyright (C) 2006, 2007, 2008, 2010, 2011 Google Inc.
4
# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
420 420

  
421 421
    """
422 422
    GanetiBaseAsyncoreDispatcher.__init__(self)
423
    assert signal_fn == None or callable(signal_fn)
423
    assert signal_fn is None or callable(signal_fn)
424 424
    (self.in_socket, self.out_socket) = socket.socketpair(socket.AF_UNIX,
425 425
                                                          socket.SOCK_STREAM)
426 426
    self.in_socket.setblocking(0)
b/lib/http/server.py
1 1
#
2 2
#
3 3

  
4
# Copyright (C) 2007, 2008, 2010 Google Inc.
4
# Copyright (C) 2007, 2008, 2010, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
202 202

  
203 203
      if version_number >= (2, 0):
204 204
        raise http.HttpVersionNotSupported("Invalid HTTP Version (%s)" %
205
                                      base_version_number)
205
                                           base_version_number)
206 206

  
207 207
    elif len(words) == 2:
208 208
      version = http.HTTP_0_9
b/lib/hypervisor/hv_base.py
1 1
#
2 2
#
3 3

  
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
81 81

  
82 82
# must be afile
83 83
_FILE_CHECK = (utils.IsNormAbsPath, "must be an absolute normalized path",
84
              os.path.isfile, "not found or not a file")
84
               os.path.isfile, "not found or not a file")
85 85

  
86 86
# must be a directory
87 87
_DIR_CHECK = (utils.IsNormAbsPath, "must be an absolute normalized path",
88
             os.path.isdir, "not found or not a directory")
88
              os.path.isdir, "not found or not a directory")
89 89

  
90 90
# CPU mask must be well-formed
91 91
# TODO: implement node level check for the CPU mask
b/lib/hypervisor/hv_kvm.py
446 446
       None, None),
447 447
    constants.HV_KVM_SPICE_PASSWORD_FILE: hv_base.OPT_FILE_CHECK,
448 448
    constants.HV_KVM_SPICE_LOSSLESS_IMG_COMPR:
449
      hv_base.ParamInSet(False,
450
        constants.HT_KVM_SPICE_VALID_LOSSLESS_IMG_COMPR_OPTIONS),
449
      hv_base.ParamInSet(
450
        False, constants.HT_KVM_SPICE_VALID_LOSSLESS_IMG_COMPR_OPTIONS),
451 451
    constants.HV_KVM_SPICE_JPEG_IMG_COMPR:
452
      hv_base.ParamInSet(False,
453
        constants.HT_KVM_SPICE_VALID_LOSSY_IMG_COMPR_OPTIONS),
452
      hv_base.ParamInSet(
453
        False, constants.HT_KVM_SPICE_VALID_LOSSY_IMG_COMPR_OPTIONS),
454 454
    constants.HV_KVM_SPICE_ZLIB_GLZ_IMG_COMPR:
455
      hv_base.ParamInSet(False,
456
        constants.HT_KVM_SPICE_VALID_LOSSY_IMG_COMPR_OPTIONS),
455
      hv_base.ParamInSet(
456
        False, constants.HT_KVM_SPICE_VALID_LOSSY_IMG_COMPR_OPTIONS),
457 457
    constants.HV_KVM_SPICE_STREAMING_VIDEO_DETECTION:
458
      hv_base.ParamInSet(False,
459
        constants.HT_KVM_SPICE_VALID_VIDEO_STREAM_DETECTION_OPTIONS),
458
      hv_base.ParamInSet(
459
        False, constants.HT_KVM_SPICE_VALID_VIDEO_STREAM_DETECTION_OPTIONS),
460 460
    constants.HV_KVM_SPICE_AUDIO_COMPR: hv_base.NO_CHECK,
461 461
    constants.HV_KVM_SPICE_USE_TLS: hv_base.NO_CHECK,
462 462
    constants.HV_KVM_SPICE_TLS_CIPHERS: hv_base.NO_CHECK,
......
787 787
  def _VerifyAffinityPackage():
788 788
    if affinity is None:
789 789
      raise errors.HypervisorError("affinity Python package not"
790
        " found; cannot use CPU pinning under KVM")
790
                                   " found; cannot use CPU pinning under KVM")
791 791

  
792 792
  @staticmethod
793 793
  def _BuildAffinityCpuMask(cpu_list):
......
833 833
        # If CPU pinning has one non-all entry, map the entire VM to
834 834
        # one set of physical CPUs
835 835
        cls._VerifyAffinityPackage()
836
        affinity.set_process_affinity_mask(process_id,
837
          cls._BuildAffinityCpuMask(all_cpu_mapping))
836
        affinity.set_process_affinity_mask(
837
          process_id, cls._BuildAffinityCpuMask(all_cpu_mapping))
838 838
    else:
839 839
      # The number of vCPUs mapped should match the number of vCPUs
840 840
      # reported by KVM. This was already verified earlier, so
......
845 845
      # For each vCPU, map it to the proper list of physical CPUs
846 846
      for vcpu, i in zip(cpu_list, range(len(cpu_list))):
847 847
        affinity.set_process_affinity_mask(thread_dict[i],
848
          cls._BuildAffinityCpuMask(vcpu))
848
                                           cls._BuildAffinityCpuMask(vcpu))
849 849

  
850 850
  def _GetVcpuThreadIds(self, instance_name):
851 851
    """Get a mapping of vCPU no. to thread IDs for the instance
......
1184 1184

  
1185 1185
      spice_arg = "addr=%s" % spice_address
1186 1186
      if hvp[constants.HV_KVM_SPICE_USE_TLS]:
1187
        spice_arg = "%s,tls-port=%s,x509-cacert-file=%s" % (spice_arg,
1188
            instance.network_port, constants.SPICE_CACERT_FILE)
1189
        spice_arg = "%s,x509-key-file=%s,x509-cert-file=%s" % (spice_arg,
1190
            constants.SPICE_CERT_FILE, constants.SPICE_CERT_FILE)
1187
        spice_arg = ("%s,tls-port=%s,x509-cacert-file=%s" %
1188
                     (spice_arg, instance.network_port,
1189
                      constants.SPICE_CACERT_FILE))
1190
        spice_arg = ("%s,x509-key-file=%s,x509-cert-file=%s" %
1191
                     (spice_arg, constants.SPICE_CERT_FILE,
1192
                      constants.SPICE_CERT_FILE))
1191 1193
        tls_ciphers = hvp[constants.HV_KVM_SPICE_TLS_CIPHERS]
1192 1194
        if tls_ciphers:
1193 1195
          spice_arg = "%s,tls-ciphers=%s" % (spice_arg, tls_ciphers)
......
1385 1387
            tap_extra = ",vhost=on"
1386 1388
          else:
1387 1389
            raise errors.HypervisorError("vhost_net is configured"
1388
                                        " but it is not available")
1390
                                         " but it is not available")
1389 1391
      else:
1390 1392
        nic_model = nic_type
1391 1393

  
......
1427 1429
    if (v_major, v_min) >= (0, 14):
1428 1430
      logging.debug("Enabling QMP")
1429 1431
      kvm_cmd.extend(["-qmp", "unix:%s,server,nowait" %
1430
                    self._InstanceQmpMonitor(instance.name)])
1432
                      self._InstanceQmpMonitor(instance.name)])
1431 1433

  
1432 1434
    # Configure the network now for starting instances and bridged interfaces,
1433 1435
    # during FinalizeMigration for incoming instances' routed interfaces
......
1712 1714
      self._CallMonitorCommand(instance_name, "stop")
1713 1715

  
1714 1716
    migrate_command = ("migrate_set_speed %dm" %
1715
        instance.hvparams[constants.HV_MIGRATION_BANDWIDTH])
1717
                       instance.hvparams[constants.HV_MIGRATION_BANDWIDTH])
1716 1718
    self._CallMonitorCommand(instance_name, migrate_command)
1717 1719

  
1718 1720
    migrate_command = ("migrate_set_downtime %dms" %
1719
        instance.hvparams[constants.HV_MIGRATION_DOWNTIME])
1721
                       instance.hvparams[constants.HV_MIGRATION_DOWNTIME])
1720 1722
    self._CallMonitorCommand(instance_name, migrate_command)
1721 1723

  
1722 1724
    migrate_command = "migrate -d tcp:%s:%s" % (target, port)
......
1777 1779
      time.sleep(self._MIGRATION_INFO_RETRY_DELAY)
1778 1780

  
1779 1781
    return objects.MigrationStatus(status=constants.HV_MIGRATION_FAILED,
1780
                                  info="Too many 'info migrate' broken answers")
1782
                                   info="Too many 'info migrate'"
1783
                                   " broken answers")
1781 1784

  
1782 1785
  def BalloonInstanceMemory(self, instance, mem):
1783 1786
    """Balloon an instance memory to a certain value.
b/lib/locking.py
1 1
#
2 2
#
3 3

  
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
1637 1637
    # point in acquiring any other lock, unless perhaps we are half way through
1638 1638
    # the migration of the current opcode.
1639 1639
    assert (self._contains_BGL(level, names) or self._BGL_owned()), (
1640
            "You must own the Big Ganeti Lock before acquiring any other")
1640
      "You must own the Big Ganeti Lock before acquiring any other")
1641 1641

  
1642 1642
    # Check we don't own locks at the same or upper levels.
1643 1643
    assert not self._upper_owned(level), ("Cannot acquire locks at a level"
1644
           " while owning some at a greater one")
1644
                                          " while owning some at a greater one")
1645 1645

  
1646 1646
    # Acquire the locks in the set.
1647 1647
    return self.__keyring[level].acquire(names, shared=shared, timeout=timeout,
......
1679 1679
    assert level in LEVELS, "Invalid locking level %s" % level
1680 1680
    assert (not self._contains_BGL(level, names) or
1681 1681
            not self._upper_owned(LEVEL_CLUSTER)), (
1682
            "Cannot release the Big Ganeti Lock while holding something"
1683
            " at upper levels (%r)" %
1684
            (utils.CommaJoin(["%s=%r" % (LEVEL_NAMES[i], self.list_owned(i))
1685
                              for i in self.__keyring.keys()]), ))
1682
              "Cannot release the Big Ganeti Lock while holding something"
1683
              " at upper levels (%r)" %
1684
              (utils.CommaJoin(["%s=%r" % (LEVEL_NAMES[i], self.list_owned(i))
1685
                                for i in self.__keyring.keys()]), ))
1686 1686

  
1687 1687
    # Release will complain if we don't own the locks already
1688 1688
    return self.__keyring[level].release(names)
......
1702 1702
    """
1703 1703
    assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
1704 1704
    assert self._BGL_owned(), ("You must own the BGL before performing other"
1705
           " operations")
1705
                               " operations")
1706 1706
    assert not self._upper_owned(level), ("Cannot add locks at a level"
1707
           " while owning some at a greater one")
1707
                                          " while owning some at a greater one")
1708 1708
    return self.__keyring[level].add(names, acquired=acquired, shared=shared)
1709 1709

  
1710 1710
  def remove(self, level, names):
......
1722 1722
    """
1723 1723
    assert level in LEVELS_MOD, "Invalid or immutable level %s" % level
1724 1724
    assert self._BGL_owned(), ("You must own the BGL before performing other"
1725
           " operations")
1725
                               " operations")
1726 1726
    # Check we either own the level or don't own anything from here
1727 1727
    # up. LockSet.remove() will check the case in which we don't own
1728 1728
    # all the needed resources, or we have a shared ownership.
b/lib/mcpu.py
1 1
#
2 2
#
3 3

  
4
# Copyright (C) 2006, 2007, 2011 Google Inc.
4
# Copyright (C) 2006, 2007, 2011, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
527 527

  
528 528
class HooksMaster(object):
529 529
  def __init__(self, opcode, hooks_path, nodes, hooks_execution_fn,
530
    hooks_results_adapt_fn, build_env_fn, log_fn, htype=None, cluster_name=None,
531
    master_name=None):
530
               hooks_results_adapt_fn, build_env_fn, log_fn, htype=None,
531
               cluster_name=None, master_name=None):
532 532
    """Base class for hooks masters.
533 533

  
534 534
    This class invokes the execution of hooks according to the behaviour
b/lib/objects.py
1098 1098
        GetVolumeList()
1099 1099

  
1100 1100
    """
1101
    if node == None:
1101
    if node is None:
1102 1102
      node = self.primary_node
1103 1103

  
1104 1104
    if lvmap is None:
......
1557 1557
    # code can be removed once upgrading straight from 2.0 is deprecated.
1558 1558
    if self.default_hypervisor is not None:
1559 1559
      self.enabled_hypervisors = ([self.default_hypervisor] +
1560
        [hvname for hvname in self.enabled_hypervisors
1561
         if hvname != self.default_hypervisor])
1560
                                  [hvname for hvname in self.enabled_hypervisors
1561
                                   if hvname != self.default_hypervisor])
1562 1562
      self.default_hypervisor = None
1563 1563

  
1564 1564
    # maintain_node_health added after 2.1.1
......
1970 1970
  @ivar fields: List of L{QueryFieldDefinition} objects
1971 1971

  
1972 1972
  """
1973
  __slots__ = [
1974
    ]
1973
  __slots__ = []
1975 1974

  
1976 1975

  
1977 1976
class MigrationStatus(ConfigObject):
b/lib/ovf.py
187 187
    except OSError, err:
188 188
      if err.errno == errno.EEXIST:
189 189
        new_path = utils.PathJoin(directory,
190
          "%s_%s%s" % (prefix, counter, suffix))
190
                                  "%s_%s%s" % (prefix, counter, suffix))
191 191
        counter += 1
192 192
      else:
193 193
        raise errors.OpPrereqError("Error moving the file %s to %s location:"
......
357 357
          sha1_sum = match.group(2)
358 358
          manifest_files[file_name] = sha1_sum
359 359
      files_with_paths = [utils.PathJoin(self.input_dir, file_name)
360
        for file_name in self.files_list]
360
                          for file_name in self.files_list]
361 361
      sha1_sums = utils.FingerprintFiles(files_with_paths)
362 362
      for file_name, value in manifest_files.iteritems():
363 363
        if sha1_sums.get(utils.PathJoin(self.input_dir, file_name)) != value:
......
401 401
      return {"hypervisor_name": constants.VALUE_AUTO}
402 402
    results = {
403 403
      "hypervisor_name": hypervisor_data.findtext("{%s}Name" % GANETI_SCHEMA,
404
                           default=constants.VALUE_AUTO),
404
                                                  default=constants.VALUE_AUTO),
405 405
    }
406 406
    parameters = hypervisor_data.find("{%s}Parameters" % GANETI_SCHEMA)
407 407
    results.update(self._GetDictParameters(parameters, GANETI_SCHEMA))
......
440 440
    vcpus = self._GetElementMatchingText(find_vcpus, match_vcpus)
441 441
    if vcpus:
442 442
      vcpus_count = vcpus.findtext("{%s}VirtualQuantity" % RASD_SCHEMA,
443
        default=constants.VALUE_AUTO)
443
                                   default=constants.VALUE_AUTO)
444 444
    else:
445 445
      vcpus_count = constants.VALUE_AUTO
446 446
    results["vcpus"] = str(vcpus_count)
......
451 451
    memory_raw = None
452 452
    if memory:
453 453
      alloc_units = memory.findtext("{%s}AllocationUnits" % RASD_SCHEMA)
454
      matching_units = [units for units, variants in
455
        ALLOCATION_UNITS.iteritems() if alloc_units.lower() in variants]
454
      matching_units = [units for units, variants in ALLOCATION_UNITS.items()
455
                        if alloc_units.lower() in variants]
456 456
      if matching_units == []:
457 457
        raise errors.OpPrereqError("Unit %s for RAM memory unknown" %
458 458
                                   alloc_units, errors.ECODE_INVAL)
459 459
      units = matching_units[0]
460 460
      memory_raw = int(memory.findtext("{%s}VirtualQuantity" % RASD_SCHEMA,
461
            default=constants.VALUE_AUTO))
461
                                       default=constants.VALUE_AUTO))
462 462
      memory_count = CONVERT_UNITS_TO_MB[units](memory_raw)
463 463
    else:
464 464
      memory_count = constants.VALUE_AUTO
465 465
    results["memory"] = str(memory_count)
466 466

  
467 467
    find_balance = ("{%s}GanetiSection/{%s}AutoBalance" %
468
                   (GANETI_SCHEMA, GANETI_SCHEMA))
468
                    (GANETI_SCHEMA, GANETI_SCHEMA))
469 469
    balance = self.tree.findtext(find_balance, default=constants.VALUE_AUTO)
470 470
    results["auto_balance"] = balance
471 471

  
......
513 513
    networks_search = ("{%s}NetworkSection/{%s}Network" %
514 514
                       (OVF_SCHEMA, OVF_SCHEMA))
515 515
    network_names = self._GetAttributes(networks_search,
516
      "{%s}name" % OVF_SCHEMA)
516
                                        "{%s}name" % OVF_SCHEMA)
517 517
    required = ["ip", "mac", "link", "mode"]
518 518
    for (counter, network_name) in enumerate(network_names):
519 519
      network_search = ("{%s}VirtualSystem/{%s}VirtualHardwareSection/{%s}Item"
......
524 524
      ganeti_match = ("{%s}name" % OVF_SCHEMA, network_name)
525 525
      network_data = self._GetElementMatchingText(network_search, network_match)
526 526
      network_ganeti_data = self._GetElementMatchingAttr(ganeti_search,
527
        ganeti_match)
527
                                                         ganeti_match)
528 528

  
529 529
      ganeti_data = {}
530 530
      if network_ganeti_data:
......
703 703
      SubElementText(network_item, "rasd:ElementName", network_name)
704 704
      SubElementText(network_item, "rasd:InstanceID", self.next_instance_id)
705 705
      SubElementText(network_item, "rasd:ResourceType",
706
        RASD_TYPE["ethernet-adapter"])
706
                     RASD_TYPE["ethernet-adapter"])
707 707
      self.hardware_list.append(network_item)
708 708
      self.next_instance_id += 1
709 709

  
......
737 737

  
738 738
    SubElementText(ganeti_section, "gnt:Version", ganeti.get("version"))
739 739
    SubElementText(ganeti_section, "gnt:DiskTemplate",
740
      ganeti.get("disk_template"))
740
                   ganeti.get("disk_template"))
741 741
    SubElementText(ganeti_section, "gnt:AutoBalance",
742
      ganeti.get("auto_balance"))
742
                   ganeti.get("auto_balance"))
743 743
    SubElementText(ganeti_section, "gnt:Tags", ganeti.get("tags"))
744 744

  
745 745
    osys = ET.SubElement(ganeti_section, "gnt:OperatingSystem")
......
779 779
    name_section.text = name
780 780
    os_attrib = {"ovf:id": "0"}
781 781
    os_section = ET.SubElement(virtual_system, "OperatingSystemSection",
782
      attrib=os_attrib)
782
                               attrib=os_attrib)
783 783
    SubElementText(os_section, "Info", "Installed guest operating system")
784 784
    hardware_section = ET.SubElement(virtual_system, "VirtualHardwareSection")
785 785
    SubElementText(hardware_section, "Info", "Virtual hardware requirements")
......
794 794
    # Item for vcpus
795 795
    vcpus_item = ET.SubElement(hardware_section, "Item")
796 796
    SubElementText(vcpus_item, "rasd:ElementName",
797
      "%s virtual CPU(s)" % vcpus)
797
                   "%s virtual CPU(s)" % vcpus)
798 798
    SubElementText(vcpus_item, "rasd:InstanceID", INSTANCE_ID["vcpus"])
799 799
    SubElementText(vcpus_item, "rasd:ResourceType", RASD_TYPE["vcpus"])
800 800
    SubElementText(vcpus_item, "rasd:VirtualQuantity", vcpus)
......
909 909
    elif action == COMPRESS:
910 910
      prefix = disk_file
911 911
    new_path = utils.GetClosedTempfile(suffix=COMPRESSION_EXT, prefix=prefix,
912
      dir=self.output_dir)
912
                                       dir=self.output_dir)
913 913
    self.temp_file_manager.Add(new_path)
914 914
    args = ["gzip", "-c", disk_path]
915 915
    run_result = utils.RunCmd(args, output=new_path)
......
940 940
      logging.warning("Conversion of disk image to %s format, this may take"
941 941
                      " a while", disk_format)
942 942

  
943
    new_disk_path = utils.GetClosedTempfile(suffix=".%s" % disk_format,
944
      prefix=disk_name, dir=self.output_dir)
943
    new_disk_path = utils.GetClosedTempfile(
944
      suffix=".%s" % disk_format, prefix=disk_name, dir=self.output_dir)
945 945
    self.temp_file_manager.Add(new_disk_path)
946 946
    args = [
947 947
      constants.QEMUIMG_PATH,
......
1122 1122
      raise errors.OpPrereqError("No %s file in %s package found" %
1123 1123
                                 (OVF_EXT, OVA_EXT), errors.ECODE_ENVIRON)
1124 1124
    logging.warning("Unpacking the %s archive, this may take a while",
1125
      input_path)
1125
                    input_path)
1126 1126
    self.input_dir = temp_dir
1127 1127
    self.input_path = utils.PathJoin(self.temp_dir, input_name)
1128 1128
    try:
......
1150 1150

  
1151 1151
    """
1152 1152
    self.results_name = self._GetInfo("instance name", self.options.name,
1153
      self._ParseNameOptions, self.ovf_reader.GetInstanceName)
1153
                                      self._ParseNameOptions,
1154
                                      self.ovf_reader.GetInstanceName)
1154 1155
    if not self.results_name:
1155 1156
      raise errors.OpPrereqError("Name of instance not provided",
1156 1157
                                 errors.ECODE_INVAL)
......
1162 1163
      raise errors.OpPrereqError("Failed to create directory %s: %s" %
1163 1164
                                 (self.output_dir, err), errors.ECODE_ENVIRON)
1164 1165

  
1165
    self.results_template = self._GetInfo("disk template",
1166
      self.options.disk_template, self._ParseTemplateOptions,
1166
    self.results_template = self._GetInfo(
1167
      "disk template", self.options.disk_template, self._ParseTemplateOptions,
1167 1168
      self.ovf_reader.GetDiskTemplate)
1168 1169
    if not self.results_template:
1169 1170
      logging.info("Disk template not given")
1170 1171

  
1171
    self.results_hypervisor = self._GetInfo("hypervisor",
1172
      self.options.hypervisor, self._ParseHypervisorOptions,
1172
    self.results_hypervisor = self._GetInfo(
1173
      "hypervisor", self.options.hypervisor, self._ParseHypervisorOptions,
1173 1174
      self.ovf_reader.GetHypervisorData)
1174 1175
    assert self.results_hypervisor["hypervisor_name"]
1175 1176
    if self.results_hypervisor["hypervisor_name"] == constants.VALUE_AUTO:
1176 1177
      logging.debug("Default hypervisor settings from the cluster will be used")
1177 1178

  
1178
    self.results_os = self._GetInfo("OS", self.options.os,
1179
      self._ParseOSOptions, self.ovf_reader.GetOSData)
1179
    self.results_os = self._GetInfo(
1180
      "OS", self.options.os, self._ParseOSOptions, self.ovf_reader.GetOSData)
1180 1181
    if not self.results_os.get("os_name"):
1181 1182
      raise errors.OpPrereqError("OS name must be provided",
1182 1183
                                 errors.ECODE_INVAL)
1183 1184

  
1184
    self.results_backend = self._GetInfo("backend", self.options.beparams,
1185
    self.results_backend = self._GetInfo(
1186
      "backend", self.options.beparams,
1185 1187
      self._ParseBackendOptions, self.ovf_reader.GetBackendData)
1186 1188
    assert self.results_backend.get("vcpus")
1187 1189
    assert self.results_backend.get("memory")
1188 1190
    assert self.results_backend.get("auto_balance") is not None
1189 1191

  
1190
    self.results_tags = self._GetInfo("tags", self.options.tags,
1191
      self._ParseTags, self.ovf_reader.GetTagsData)
1192
    self.results_tags = self._GetInfo(
1193
      "tags", self.options.tags, self._ParseTags, self.ovf_reader.GetTagsData)
1192 1194

  
1193 1195
    ovf_version = self.ovf_reader.GetVersionData()
1194 1196
    if ovf_version:
......
1196 1198
    else:
1197 1199
      self.results_version = constants.EXPORT_VERSION
1198 1200

  
1199
    self.results_network = self._GetInfo("network", self.options.nics,
1200
      self._ParseNicOptions, self.ovf_reader.GetNetworkData,
1201
      ignore_test=self.options.no_nics)
1201
    self.results_network = self._GetInfo(
1202
      "network", self.options.nics, self._ParseNicOptions,
1203
      self.ovf_reader.GetNetworkData, ignore_test=self.options.no_nics)
1202 1204

  
1203
    self.results_disk = self._GetInfo("disk", self.options.disks,
1204
      self._ParseDiskOptions, self._GetDiskInfo,
1205
    self.results_disk = self._GetInfo(
1206
      "disk", self.options.disks, self._ParseDiskOptions, self._GetDiskInfo,
1205 1207
      ignore_test=self.results_template == constants.DT_DISKLESS)
1206 1208

  
1207 1209
    if not self.results_disk and not self.results_network:
......
1211 1213

  
1212 1214
  @staticmethod
1213 1215
  def _GetInfo(name, cmd_arg, cmd_function, nocmd_function,
1214
    ignore_test=False):
1216
               ignore_test=False):
1215 1217
    """Get information about some section - e.g. disk, network, hypervisor.
1216 1218

  
1217 1219
    @type name: string
......
1232 1234
      results = cmd_function()
1233 1235
    else:
1234 1236
      logging.info("Information for %s will be parsed from %s file",
1235
        name, OVF_EXT)
1237
                   name, OVF_EXT)
1236 1238
      results = nocmd_function()
1237 1239
    logging.info("Options for %s were succesfully read", name)
1238 1240
    return results
......
1398 1400
      disk_path = utils.PathJoin(self.input_dir, disk_name)
1399 1401
      if disk_compression not in NO_COMPRESSION:
1400 1402
        _, disk_path = self._CompressDisk(disk_path, disk_compression,
1401
          DECOMPRESS)
1403
                                          DECOMPRESS)
1402 1404
        disk, _ = os.path.splitext(disk)
1403 1405
      if self._GetDiskQemuInfo(disk_path, "file format: (\S+)") != "raw":
1404 1406
        logging.info("Conversion to raw format is required")
1405 1407
      ext, new_disk_path = self._ConvertDisk("raw", disk_path)
1406 1408

  
1407 1409
      final_disk_path = LinkFile(new_disk_path, prefix=disk, suffix=ext,
1408
        directory=self.output_dir)
1410
                                 directory=self.output_dir)
1409 1411
      final_name = os.path.basename(final_disk_path)
1410 1412
      disk_size = os.path.getsize(final_disk_path) / (1024 * 1024)
1411 1413
      results["disk%s_dump" % counter] = final_name
......
1453 1455
    results[constants.INISECT_HYP].update(self.results_hypervisor)
1454 1456

  
1455 1457
    output_file_name = utils.PathJoin(self.output_dir,
1456
      constants.EXPORT_CONF_FILE)
1458
                                      constants.EXPORT_CONF_FILE)
1457 1459

  
1458 1460
    output = []
1459 1461
    for section, options in results.iteritems():
......
1480 1482
  """
1481 1483
  def get(self, section, options, raw=None, vars=None): # pylint: disable=W0622
1482 1484
    try:
1483
      result = ConfigParser.SafeConfigParser.get(self, section, options, \
1484
        raw=raw, vars=vars)
1485
      result = ConfigParser.SafeConfigParser.get(self, section, options,
1486
                                                 raw=raw, vars=vars)
1485 1487
    except ConfigParser.NoOptionError:
1486 1488
      result = None
1487 1489
    return result
......
1662 1664
        break
1663 1665
      results.append({
1664 1666
        "mode": self.config_parser.get(constants.INISECT_INS,
1665
           "nic%s_mode" % counter),
1667
                                       "nic%s_mode" % counter),
1666 1668
        "mac": self.config_parser.get(constants.INISECT_INS,
1667
           "nic%s_mac" % counter),
1669
                                      "nic%s_mac" % counter),
1668 1670
        "ip": self.config_parser.get(constants.INISECT_INS,
1669
           "nic%s_ip" % counter),
1671
                                     "nic%s_ip" % counter),
1670 1672
        "link": data_link,
1671 1673
      })
1672 1674
      if results[counter]["mode"] not in constants.NIC_VALID_MODES:
......
1698 1700
    disk_name, _ = os.path.splitext(disk_file)
1699 1701
    ext, new_disk_path = self._ConvertDisk(self.options.disk_format, disk_path)
1700 1702
    results["format"] = self.options.disk_format
1701
    results["virt-size"] = self._GetDiskQemuInfo(new_disk_path,
1702
      "virtual size: \S+ \((\d+) bytes\)")
1703
    results["virt-size"] = self._GetDiskQemuInfo(
1704
      new_disk_path, "virtual size: \S+ \((\d+) bytes\)")
1703 1705
    if compression:
1704 1706
      ext2, new_disk_path = self._CompressDisk(new_disk_path, "gzip",
1705
        COMPRESS)
1707
                                               COMPRESS)
1706 1708
      disk_name, _ = os.path.splitext(disk_name)
1707 1709
      results["compression"] = "gzip"
1708 1710
      ext += ext2
1709 1711
    final_disk_path = LinkFile(new_disk_path, prefix=disk_name, suffix=ext,
1710
      directory=self.output_dir)
1712
                               directory=self.output_dir)
1711 1713
    final_disk_name = os.path.basename(final_disk_path)
1712 1714
    results["real-size"] = os.path.getsize(final_disk_path)
1713 1715
    results["path"] = final_disk_name
......
1815 1817
      self.ovf_writer.SaveGanetiData(self.results_ganeti, self.results_network)
1816 1818

  
1817 1819
    self.ovf_writer.SaveVirtualSystemData(self.results_name, self.results_vcpus,
1818
      self.results_memory)
1820
                                          self.results_memory)
1819 1821

  
1820 1822
    data = self.ovf_writer.PrettyXmlDump()
1821 1823
    utils.WriteFile(self.output_path, data=data)
b/lib/rpc_defs.py
502 502
  ]
503 503

  
504 504
CALLS = {
505
  "RpcClientDefault": \
505
  "RpcClientDefault":
506 506
    _Prepare(_IMPEXP_CALLS + _X509_CALLS + _OS_CALLS + _NODE_CALLS +
507 507
             _FILE_STORAGE_CALLS + _MISC_CALLS + _INSTANCE_CALLS +
508 508
             _BLOCKDEV_CALLS + _STORAGE_CALLS),
b/lib/server/masterd.py
461 461

  
462 462
    # Locking manager
463 463
    self.glm = locking.GanetiLockManager(
464
                self.cfg.GetNodeList(),
465
                self.cfg.GetNodeGroupList(),
466
                self.cfg.GetInstanceList())
464
      self.cfg.GetNodeList(),
465
      self.cfg.GetNodeGroupList(),
466
      self.cfg.GetInstanceList())
467 467

  
468 468
    self.cfg.SetContext(self)
469 469

  
b/lib/server/noded.py
1059 1059
  mainloop = daemon.Mainloop()
1060 1060
  server = \
1061 1061
    http.server.HttpServer(mainloop, options.bind_address, options.port,
1062
      handler, ssl_params=ssl_params, ssl_verify_peer=True,
1063
      request_executor_class=request_executor_class)
1062
                           handler, ssl_params=ssl_params, ssl_verify_peer=True,
1063
                           request_executor_class=request_executor_class)
1064 1064
  server.Start()
1065 1065

  
1066 1066
  return (mainloop, server)
b/lib/server/rapi.py
1 1
#
2 2
#
3 3

  
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc.
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
332 332

  
333 333
  server = \
334 334
    http.server.HttpServer(mainloop, options.bind_address, options.port,
335
      handler, ssl_params=options.ssl_params, ssl_verify_peer=False)
335
                           handler,
336
                           ssl_params=options.ssl_params, ssl_verify_peer=False)
336 337
  server.Start()
337 338

  
338 339
  return (mainloop, server)
......
354 355

  
355 356
  """
356 357
  parser = optparse.OptionParser(description="Ganeti Remote API",
357
                    usage="%prog [-f] [-d] [-p port] [-b ADDRESS]",
358
                    version="%%prog (ganeti) %s" % constants.RELEASE_VERSION)
358
                                 usage="%prog [-f] [-d] [-p port] [-b ADDRESS]",
359
                                 version="%%prog (ganeti) %s" %
360
                                 constants.RELEASE_VERSION)
359 361

  
360 362
  daemon.GenericMain(constants.RAPI, parser, CheckRapi, PrepRapi, ExecRapi,
361 363
                     default_ssl_cert=constants.RAPI_CERT_FILE,
b/lib/utils/io.py
1 1
#
2 2
#
3 3

  
4
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
4
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
5 5
#
6 6
# This program is free software; you can redistribute it and/or modify
7 7
# it under the terms of the GNU General Public License as published by
......
525 525
  """
526 526
  if not os.path.isfile(file_name):
527 527
    raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
528
                                file_name)
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff