Revision a8c931c0

b/lib/cmdlib.py
53 53
import ganeti.masterd.instance # pylint: disable-msg=W0611
54 54

  
55 55

  
56
# need to define these here before the actual LUs
57

  
58
def _EmptyList():
59
  """Returns an empty list.
60

  
61
  """
62
  return []
63

  
64

  
65
def _EmptyDict():
66
  """Returns an empty dict.
67

  
68
  """
69
  return {}
70

  
71

  
56 72
class LogicalUnit(object):
57 73
  """Logical Unit base class.
58 74

  
......
69 85

  
70 86
  @ivar dry_run_result: the value (if any) that will be returned to the caller
71 87
      in dry-run mode (signalled by opcode dry_run parameter)
88
  @cvar _OP_DEFS: a list of opcode attributes and the defaults values
89
      they should get if not already existing
72 90

  
73 91
  """
74 92
  HPATH = None
75 93
  HTYPE = None
76 94
  _OP_REQP = []
95
  _OP_DEFS = []
77 96
  REQ_BGL = True
78 97

  
79 98
  def __init__(self, processor, op, context, rpc):
......
111 130
    # Tasklets
112 131
    self.tasklets = None
113 132

  
133
    for aname, aval in self._OP_DEFS:
134
      if not hasattr(self.op, aname):
135
        if callable(aval):
136
          dval = aval()
137
        else:
138
          dval = aval
139
        setattr(self.op, aname, dval)
140

  
114 141
    for attr_name in self._OP_REQP:
115 142
      attr_val = getattr(op, attr_name, None)
116 143
      if attr_val is None:
......
2389 2416
  HPATH = "cluster-modify"
2390 2417
  HTYPE = constants.HTYPE_CLUSTER
2391 2418
  _OP_REQP = []
2419
  _OP_DEFS = [
2420
    ("candidate_pool_size", None),
2421
    ("uid_pool", None),
2422
    ("add_uids", None),
2423
    ("remove_uids", None),
2424
    ]
2392 2425
  REQ_BGL = False
2393 2426

  
2394 2427
  def CheckArguments(self):
2395 2428
    """Check parameters
2396 2429

  
2397 2430
    """
2398
    for attr in ["candidate_pool_size",
2399
                 "uid_pool", "add_uids", "remove_uids"]:
2400
      if not hasattr(self.op, attr):
2401
        setattr(self.op, attr, None)
2402

  
2403 2431
    if self.op.candidate_pool_size is not None:
2404 2432
      try:
2405 2433
        self.op.candidate_pool_size = int(self.op.candidate_pool_size)
......
3314 3342

  
3315 3343
  """
3316 3344
  _OP_REQP = ["nodes", "storage_type", "output_fields"]
3345
  _OP_DEFS = [("name", None)]
3317 3346
  REQ_BGL = False
3318 3347
  _FIELDS_STATIC = utils.FieldSet(constants.SF_NODE)
3319 3348

  
......
3340 3369
    This checks that the fields required are valid output fields.
3341 3370

  
3342 3371
    """
3343
    self.op.name = getattr(self.op, "name", None)
3344

  
3345 3372
    self.nodes = self.acquired_locks[locking.LEVEL_NODE]
3346 3373

  
3347 3374
  def Exec(self, feedback_fn):
......
3459 3486
  HPATH = "node-add"
3460 3487
  HTYPE = constants.HTYPE_NODE
3461 3488
  _OP_REQP = ["node_name"]
3489
  _OP_DEFS = [("secondary_ip", None)]
3462 3490

  
3463 3491
  def CheckArguments(self):
3464 3492
    # validate/normalize the node name
......
3498 3526

  
3499 3527
    node = dns_data.name
3500 3528
    primary_ip = self.op.primary_ip = dns_data.ip
3501
    secondary_ip = getattr(self.op, "secondary_ip", None)
3502
    if secondary_ip is None:
3503
      secondary_ip = primary_ip
3504
    if not utils.IsValidIP(secondary_ip):
3529
    if self.op.secondary_ip is None:
3530
      self.op.secondary_ip = primary_ip
3531
    if not utils.IsValidIP(self.op.secondary_ip):
3505 3532
      raise errors.OpPrereqError("Invalid secondary IP given",
3506 3533
                                 errors.ECODE_INVAL)
3507
    self.op.secondary_ip = secondary_ip
3534
    secondary_ip = self.op.secondary_ip
3508 3535

  
3509 3536
    node_list = cfg.GetNodeList()
3510 3537
    if not self.op.readd and node in node_list:
......
3997 4024

  
3998 4025
  """
3999 4026
  _OP_REQP = ["instance_name"]
4027
  _OP_DEFS = [("ignore_size", False)]
4000 4028
  REQ_BGL = False
4001 4029

  
4002 4030
  def ExpandNames(self):
......
4018 4046
    assert self.instance is not None, \
4019 4047
      "Cannot retrieve locked instance %s" % self.op.instance_name
4020 4048
    _CheckNodeOnline(self, self.instance.primary_node)
4021
    if not hasattr(self.op, "ignore_size"):
4022
      self.op.ignore_size = False
4023 4049

  
4024 4050
  def Exec(self, feedback_fn):
4025 4051
    """Activate the disks.
......
4305 4331
  HPATH = "instance-start"
4306 4332
  HTYPE = constants.HTYPE_INSTANCE
4307 4333
  _OP_REQP = ["instance_name", "force"]
4334
  _OP_DEFS = [
4335
    ("beparams", _EmptyDict),
4336
    ("hvparams", _EmptyDict),
4337
    ]
4308 4338
  REQ_BGL = False
4309 4339

  
4310 4340
  def ExpandNames(self):
......
4334 4364
      "Cannot retrieve locked instance %s" % self.op.instance_name
4335 4365

  
4336 4366
    # extra beparams
4337
    self.beparams = getattr(self.op, "beparams", {})
4338
    if self.beparams:
4339
      if not isinstance(self.beparams, dict):
4367
    if self.op.beparams:
4368
      if not isinstance(self.op.beparams, dict):
4340 4369
        raise errors.OpPrereqError("Invalid beparams passed: %s, expected"
4341
                                   " dict" % (type(self.beparams), ),
4370
                                   " dict" % (type(self.op.beparams), ),
4342 4371
                                   errors.ECODE_INVAL)
4343 4372
      # fill the beparams dict
4344
      utils.ForceDictType(self.beparams, constants.BES_PARAMETER_TYPES)
4345
      self.op.beparams = self.beparams
4373
      utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
4346 4374

  
4347 4375
    # extra hvparams
4348
    self.hvparams = getattr(self.op, "hvparams", {})
4349
    if self.hvparams:
4350
      if not isinstance(self.hvparams, dict):
4376
    if self.op.hvparams:
4377
      if not isinstance(self.op.hvparams, dict):
4351 4378
        raise errors.OpPrereqError("Invalid hvparams passed: %s, expected"
4352
                                   " dict" % (type(self.hvparams), ),
4379
                                   " dict" % (type(self.op.hvparams), ),
4353 4380
                                   errors.ECODE_INVAL)
4354 4381

  
4355 4382
      # check hypervisor parameter syntax (locally)
4356 4383
      cluster = self.cfg.GetClusterInfo()
4357
      utils.ForceDictType(self.hvparams, constants.HVS_PARAMETER_TYPES)
4384
      utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
4358 4385
      filled_hvp = cluster.FillHV(instance)
4359
      filled_hvp.update(self.hvparams)
4386
      filled_hvp.update(self.op.hvparams)
4360 4387
      hv_type = hypervisor.GetHypervisor(instance.hypervisor)
4361 4388
      hv_type.CheckParameterSyntax(filled_hvp)
4362 4389
      _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
4363
      self.op.hvparams = self.hvparams
4364 4390

  
4365 4391
    _CheckNodeOnline(self, instance.primary_node)
4366 4392

  
......
4392 4418
    _StartInstanceDisks(self, instance, force)
4393 4419

  
4394 4420
    result = self.rpc.call_instance_start(node_current, instance,
4395
                                          self.hvparams, self.beparams)
4421
                                          self.op.hvparams, self.op.beparams)
4396 4422
    msg = result.fail_msg
4397 4423
    if msg:
4398 4424
      _ShutdownInstanceDisks(self, instance)
......
4406 4432
  HPATH = "instance-reboot"
4407 4433
  HTYPE = constants.HTYPE_INSTANCE
4408 4434
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
4435
  _OP_DEFS = [("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
4409 4436
  REQ_BGL = False
4410 4437

  
4411
  def CheckArguments(self):
4412
    """Check the arguments.
4413

  
4414
    """
4415
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4416
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4417

  
4418 4438
  def ExpandNames(self):
4419 4439
    if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
4420 4440
                                   constants.INSTANCE_REBOOT_HARD,
......
4434 4454
    env = {
4435 4455
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
4436 4456
      "REBOOT_TYPE": self.op.reboot_type,
4437
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
4457
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
4438 4458
      }
4439 4459
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
4440 4460
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
......
4471 4491
        self.cfg.SetDiskID(disk, node_current)
4472 4492
      result = self.rpc.call_instance_reboot(node_current, instance,
4473 4493
                                             reboot_type,
4474
                                             self.shutdown_timeout)
4494
                                             self.op.shutdown_timeout)
4475 4495
      result.Raise("Could not reboot instance")
4476 4496
    else:
4477 4497
      result = self.rpc.call_instance_shutdown(node_current, instance,
4478
                                               self.shutdown_timeout)
4498
                                               self.op.shutdown_timeout)
4479 4499
      result.Raise("Could not shutdown instance for full reboot")
4480 4500
      _ShutdownInstanceDisks(self, instance)
4481 4501
      _StartInstanceDisks(self, instance, ignore_secondaries)
......
4496 4516
  HPATH = "instance-stop"
4497 4517
  HTYPE = constants.HTYPE_INSTANCE
4498 4518
  _OP_REQP = ["instance_name"]
4519
  _OP_DEFS = [("timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
4499 4520
  REQ_BGL = False
4500 4521

  
4501
  def CheckArguments(self):
4502
    """Check the arguments.
4503

  
4504
    """
4505
    self.timeout = getattr(self.op, "timeout",
4506
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
4507

  
4508 4522
  def ExpandNames(self):
4509 4523
    self._ExpandAndLockInstance()
4510 4524

  
......
4515 4529

  
4516 4530
    """
4517 4531
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4518
    env["TIMEOUT"] = self.timeout
4532
    env["TIMEOUT"] = self.op.timeout
4519 4533
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
4520 4534
    return env, nl, nl
4521 4535

  
......
4536 4550
    """
4537 4551
    instance = self.instance
4538 4552
    node_current = instance.primary_node
4539
    timeout = self.timeout
4553
    timeout = self.op.timeout
4540 4554
    self.cfg.MarkInstanceDown(instance.name)
4541 4555
    result = self.rpc.call_instance_shutdown(node_current, instance, timeout)
4542 4556
    msg = result.fail_msg
......
4553 4567
  HPATH = "instance-reinstall"
4554 4568
  HTYPE = constants.HTYPE_INSTANCE
4555 4569
  _OP_REQP = ["instance_name"]
4570
  _OP_DEFS = [
4571
    ("os_type", None),
4572
    ("force_variant", False),
4573
    ]
4556 4574
  REQ_BGL = False
4557 4575

  
4558 4576
  def ExpandNames(self):
......
4585 4603
                                 errors.ECODE_INVAL)
4586 4604
    _CheckInstanceDown(self, instance, "cannot reinstall")
4587 4605

  
4588
    self.op.os_type = getattr(self.op, "os_type", None)
4589
    self.op.force_variant = getattr(self.op, "force_variant", False)
4590 4606
    if self.op.os_type is not None:
4591 4607
      # OS verification
4592 4608
      pnode = _ExpandNodeName(self.cfg, instance.primary_node)
......
4697 4713
  HPATH = "instance-rename"
4698 4714
  HTYPE = constants.HTYPE_INSTANCE
4699 4715
  _OP_REQP = ["instance_name", "new_name"]
4716
  _OP_DEFS = [("ignore_ip", False)]
4700 4717

  
4701 4718
  def BuildHooksEnv(self):
4702 4719
    """Build hooks env.
......
4732 4749
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
4733 4750
                                 new_name, errors.ECODE_EXISTS)
4734 4751

  
4735
    if not getattr(self.op, "ignore_ip", False):
4752
    if not self.op.ignore_ip:
4736 4753
      if utils.TcpPing(name_info.ip, constants.DEFAULT_NODED_PORT):
4737 4754
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
4738 4755
                                   (name_info.ip, new_name),
......
4788 4805
  HPATH = "instance-remove"
4789 4806
  HTYPE = constants.HTYPE_INSTANCE
4790 4807
  _OP_REQP = ["instance_name", "ignore_failures"]
4808
  _OP_DEFS = [("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
4791 4809
  REQ_BGL = False
4792 4810

  
4793
  def CheckArguments(self):
4794
    """Check the arguments.
4795

  
4796
    """
4797
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
4798
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
4799

  
4800 4811
  def ExpandNames(self):
4801 4812
    self._ExpandAndLockInstance()
4802 4813
    self.needed_locks[locking.LEVEL_NODE] = []
......
4813 4824

  
4814 4825
    """
4815 4826
    env = _BuildInstanceHookEnvByObject(self, self.instance)
4816
    env["SHUTDOWN_TIMEOUT"] = self.shutdown_timeout
4827
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
4817 4828
    nl = [self.cfg.GetMasterNode()]
4818 4829
    nl_post = list(self.instance.all_nodes) + nl
4819 4830
    return env, nl, nl_post
......
4837 4848
                 instance.name, instance.primary_node)
4838 4849

  
4839 4850
    result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
4840
                                             self.shutdown_timeout)
4851
                                             self.op.shutdown_timeout)
4841 4852
    msg = result.fail_msg
4842 4853
    if msg:
4843 4854
      if self.op.ignore_failures:
......
5161 5172
  HPATH = "instance-failover"
5162 5173
  HTYPE = constants.HTYPE_INSTANCE
5163 5174
  _OP_REQP = ["instance_name", "ignore_consistency"]
5175
  _OP_DEFS = [("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
5164 5176
  REQ_BGL = False
5165 5177

  
5166
  def CheckArguments(self):
5167
    """Check the arguments.
5168

  
5169
    """
5170
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
5171
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
5172

  
5173 5178
  def ExpandNames(self):
5174 5179
    self._ExpandAndLockInstance()
5175 5180
    self.needed_locks[locking.LEVEL_NODE] = []
......
5190 5195
    target_node = instance.secondary_nodes[0]
5191 5196
    env = {
5192 5197
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
5193
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
5198
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5194 5199
      "OLD_PRIMARY": source_node,
5195 5200
      "OLD_SECONDARY": target_node,
5196 5201
      "NEW_PRIMARY": target_node,
......
5266 5271
                 instance.name, source_node)
5267 5272

  
5268 5273
    result = self.rpc.call_instance_shutdown(source_node, instance,
5269
                                             self.shutdown_timeout)
5274
                                             self.op.shutdown_timeout)
5270 5275
    msg = result.fail_msg
5271 5276
    if msg:
5272 5277
      if self.op.ignore_consistency:
......
5294 5299
                   instance.name, target_node)
5295 5300

  
5296 5301
      disks_ok, _ = _AssembleInstanceDisks(self, instance,
5297
                                               ignore_secondaries=True)
5302
                                           ignore_secondaries=True)
5298 5303
      if not disks_ok:
5299 5304
        _ShutdownInstanceDisks(self, instance)
5300 5305
        raise errors.OpExecError("Can't activate the instance's disks")
......
5366 5371
  HPATH = "instance-move"
5367 5372
  HTYPE = constants.HTYPE_INSTANCE
5368 5373
  _OP_REQP = ["instance_name", "target_node"]
5374
  _OP_DEFS = [("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT)]
5369 5375
  REQ_BGL = False
5370 5376

  
5371
  def CheckArguments(self):
5372
    """Check the arguments.
5373

  
5374
    """
5375
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
5376
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
5377

  
5378 5377
  def ExpandNames(self):
5379 5378
    self._ExpandAndLockInstance()
5380 5379
    target_node = _ExpandNodeName(self.cfg, self.op.target_node)
......
5394 5393
    """
5395 5394
    env = {
5396 5395
      "TARGET_NODE": self.op.target_node,
5397
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
5396
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
5398 5397
      }
5399 5398
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
5400 5399
    nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
......
5460 5459
                 instance.name, source_node)
5461 5460

  
5462 5461
    result = self.rpc.call_instance_shutdown(source_node, instance,
5463
                                             self.shutdown_timeout)
5462
                                             self.op.shutdown_timeout)
5464 5463
    msg = result.fail_msg
5465 5464
    if msg:
5466 5465
      if self.op.ignore_consistency:
......
6285 6284
              "mode", "start",
6286 6285
              "wait_for_sync", "ip_check", "nics",
6287 6286
              "hvparams", "beparams", "osparams"]
6287
  _OP_DEFS = [
6288
    ("name_check", True),
6289
    ("no_install", False),
6290
    ("os_type", None),
6291
    ("force_variant", False),
6292
    ("source_handshake", None),
6293
    ("source_x509_ca", None),
6294
    ("source_instance_name", None),
6295
    ("src_node", None),
6296
    ("src_path", None),
6297
    ("pnode", None),
6298
    ("snode", None),
6299
    ("iallocator", None),
6300
    ("hypervisor", None),
6301
    ("disk_template", None),
6302
    ("identify_defaults", None),
6303
    ]
6288 6304
  REQ_BGL = False
6289 6305

  
6290 6306
  def CheckArguments(self):
6291 6307
    """Check arguments.
6292 6308

  
6293 6309
    """
6294
    # set optional parameters to none if they don't exist
6295
    for attr in ["pnode", "snode", "iallocator", "hypervisor",
6296
                 "disk_template", "identify_defaults"]:
6297
      if not hasattr(self.op, attr):
6298
        setattr(self.op, attr, None)
6299

  
6300 6310
    # do not require name_check to ease forward/backward compatibility
6301 6311
    # for tools
6302
    if not hasattr(self.op, "name_check"):
6303
      self.op.name_check = True
6304
    if not hasattr(self.op, "no_install"):
6305
      self.op.no_install = False
6306 6312
    if self.op.no_install and self.op.start:
6307 6313
      self.LogInfo("No-installation mode selected, disabling startup")
6308 6314
      self.op.start = False
......
6387 6393
        self.LogInfo("No-installation mode has no effect during import")
6388 6394

  
6389 6395
    elif self.op.mode == constants.INSTANCE_CREATE:
6390
      if getattr(self.op, "os_type", None) is None:
6396
      if self.op.os_type is None:
6391 6397
        raise errors.OpPrereqError("No guest OS specified",
6392 6398
                                   errors.ECODE_INVAL)
6393
      self.op.force_variant = getattr(self.op, "force_variant", False)
6394 6399
      if self.op.disk_template is None:
6395 6400
        raise errors.OpPrereqError("No disk template specified",
6396 6401
                                   errors.ECODE_INVAL)
6397 6402

  
6398 6403
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
6399 6404
      # Check handshake to ensure both clusters have the same domain secret
6400
      src_handshake = getattr(self.op, "source_handshake", None)
6405
      src_handshake = self.op.source_handshake
6401 6406
      if not src_handshake:
6402 6407
        raise errors.OpPrereqError("Missing source handshake",
6403 6408
                                   errors.ECODE_INVAL)
......
6409 6414
                                   errors.ECODE_INVAL)
6410 6415

  
6411 6416
      # Load and check source CA
6412
      self.source_x509_ca_pem = getattr(self.op, "source_x509_ca", None)
6417
      self.source_x509_ca_pem = self.op.source_x509_ca
6413 6418
      if not self.source_x509_ca_pem:
6414 6419
        raise errors.OpPrereqError("Missing source X509 CA",
6415 6420
                                   errors.ECODE_INVAL)
......
6428 6433

  
6429 6434
      self.source_x509_ca = cert
6430 6435

  
6431
      src_instance_name = getattr(self.op, "source_instance_name", None)
6436
      src_instance_name = self.op.source_instance_name
6432 6437
      if not src_instance_name:
6433 6438
        raise errors.OpPrereqError("Missing source instance name",
6434 6439
                                   errors.ECODE_INVAL)
......
6469 6474

  
6470 6475
    # in case of import lock the source node too
6471 6476
    if self.op.mode == constants.INSTANCE_IMPORT:
6472
      src_node = getattr(self.op, "src_node", None)
6473
      src_path = getattr(self.op, "src_path", None)
6477
      src_node = self.op.src_node
6478
      src_path = self.op.src_path
6474 6479

  
6475 6480
      if src_path is None:
6476 6481
        self.op.src_path = src_path = self.op.instance_name
......
7232 7237
  HPATH = "mirrors-replace"
7233 7238
  HTYPE = constants.HTYPE_INSTANCE
7234 7239
  _OP_REQP = ["instance_name", "mode", "disks"]
7240
  _OP_DEFS = [
7241
    ("remote_node", None),
7242
    ("iallocator", None),
7243
    ("early_release", None),
7244
    ]
7235 7245
  REQ_BGL = False
7236 7246

  
7237 7247
  def CheckArguments(self):
7238
    if not hasattr(self.op, "remote_node"):
7239
      self.op.remote_node = None
7240
    if not hasattr(self.op, "iallocator"):
7241
      self.op.iallocator = None
7242
    if not hasattr(self.op, "early_release"):
7243
      self.op.early_release = False
7244

  
7245 7248
    TLReplaceDisks.CheckArguments(self.op.mode, self.op.remote_node,
7246 7249
                                  self.op.iallocator)
7247 7250

  
......
7308 7311
  HPATH = "node-evacuate"
7309 7312
  HTYPE = constants.HTYPE_NODE
7310 7313
  _OP_REQP = ["node_name"]
7314
  _OP_DEFS = [
7315
    ("remote_node", None),
7316
    ("iallocator", None),
7317
    ("early_release", False),
7318
    ]
7311 7319
  REQ_BGL = False
7312 7320

  
7313 7321
  def CheckArguments(self):
7314
    if not hasattr(self.op, "remote_node"):
7315
      self.op.remote_node = None
7316
    if not hasattr(self.op, "iallocator"):
7317
      self.op.iallocator = None
7318
    if not hasattr(self.op, "early_release"):
7319
      self.op.early_release = False
7320

  
7321 7322
    TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
7322 7323
                                  self.op.remote_node,
7323 7324
                                  self.op.iallocator)
......
8129 8130

  
8130 8131
  """
8131 8132
  _OP_REQP = ["nodes"]
8133
  _OP_DEFS = [
8134
    ("remote_node", None),
8135
    ("iallocator", None),
8136
    ]
8132 8137
  REQ_BGL = False
8133 8138

  
8134 8139
  def CheckArguments(self):
8135
    if not hasattr(self.op, "remote_node"):
8136
      self.op.remote_node = None
8137
    if not hasattr(self.op, "iallocator"):
8138
      self.op.iallocator = None
8139 8140
    if self.op.remote_node is not None and self.op.iallocator is not None:
8140 8141
      raise errors.OpPrereqError("Give either the iallocator or the new"
8141 8142
                                 " secondary, not both", errors.ECODE_INVAL)
......
8442 8443
  HPATH = "instance-modify"
8443 8444
  HTYPE = constants.HTYPE_INSTANCE
8444 8445
  _OP_REQP = ["instance_name"]
8446
  _OP_DEFS = [
8447
    ("nics", _EmptyList),
8448
    ("disks", _EmptyList),
8449
    ("beparams", _EmptyDict),
8450
    ("hvparams", _EmptyDict),
8451
    ("disk_template", None),
8452
    ("remote_node", None),
8453
    ("os_name", None),
8454
    ("force_variant", False),
8455
    ("osparams", None),
8456
    ("force", False),
8457
    ]
8445 8458
  REQ_BGL = False
8446 8459

  
8447 8460
  def CheckArguments(self):
8448
    if not hasattr(self.op, 'nics'):
8449
      self.op.nics = []
8450
    if not hasattr(self.op, 'disks'):
8451
      self.op.disks = []
8452
    if not hasattr(self.op, 'beparams'):
8453
      self.op.beparams = {}
8454
    if not hasattr(self.op, 'hvparams'):
8455
      self.op.hvparams = {}
8456
    if not hasattr(self.op, "disk_template"):
8457
      self.op.disk_template = None
8458
    if not hasattr(self.op, "remote_node"):
8459
      self.op.remote_node = None
8460
    if not hasattr(self.op, "os_name"):
8461
      self.op.os_name = None
8462
    if not hasattr(self.op, "force_variant"):
8463
      self.op.force_variant = False
8464
    if not hasattr(self.op, "osparams"):
8465
      self.op.osparams = None
8466
    self.op.force = getattr(self.op, "force", False)
8467 8461
    if not (self.op.nics or self.op.disks or self.op.disk_template or
8468 8462
            self.op.hvparams or self.op.beparams or self.op.os_name):
8469 8463
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
......
8650 8644
    This only checks the instance list against the existing names.
8651 8645

  
8652 8646
    """
8653
    self.force = self.op.force
8654

  
8655 8647
    # checking the new params on the primary/secondary nodes
8656 8648

  
8657 8649
    instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
......
8725 8717

  
8726 8718
    self.warn = []
8727 8719

  
8728
    if constants.BE_MEMORY in self.op.beparams and not self.force:
8720
    if constants.BE_MEMORY in self.op.beparams and not self.op.force:
8729 8721
      mem_check_list = [pnode]
8730 8722
      if be_new[constants.BE_AUTO_BALANCE]:
8731 8723
        # either we changed auto_balance to yes or it was from before
......
8824 8816
        msg = self.rpc.call_bridges_exist(pnode, [nic_bridge]).fail_msg
8825 8817
        if msg:
8826 8818
          msg = "Error checking bridges on node %s: %s" % (pnode, msg)
8827
          if self.force:
8819
          if self.op.force:
8828 8820
            self.warn.append(msg)
8829 8821
          else:
8830 8822
            raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
......
9217 9209
  HPATH = "instance-export"
9218 9210
  HTYPE = constants.HTYPE_INSTANCE
9219 9211
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
9212
  _OP_DEFS = [
9213
    ("shutdown_timeout", constants.DEFAULT_SHUTDOWN_TIMEOUT),
9214
    ("remove_instance", False),
9215
    ("ignore_remove_failures", False),
9216
    ("mode", constants.EXPORT_MODE_LOCAL),
9217
    ("x509_key_name", None),
9218
    ("destination_x509_ca", None),
9219
    ]
9220 9220
  REQ_BGL = False
9221 9221

  
9222 9222
  def CheckArguments(self):
9223 9223
    """Check the arguments.
9224 9224

  
9225 9225
    """
9226
    _CheckBooleanOpField(self.op, "remove_instance")
9227
    _CheckBooleanOpField(self.op, "ignore_remove_failures")
9228

  
9229
    self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
9230
                                    constants.DEFAULT_SHUTDOWN_TIMEOUT)
9231
    self.remove_instance = getattr(self.op, "remove_instance", False)
9232
    self.ignore_remove_failures = getattr(self.op, "ignore_remove_failures",
9233
                                          False)
9234
    self.export_mode = getattr(self.op, "mode", constants.EXPORT_MODE_LOCAL)
9235
    self.x509_key_name = getattr(self.op, "x509_key_name", None)
9236
    self.dest_x509_ca_pem = getattr(self.op, "destination_x509_ca", None)
9226
    self.x509_key_name = self.op.x509_key_name
9227
    self.dest_x509_ca_pem = self.op.destination_x509_ca
9237 9228

  
9238
    if self.remove_instance and not self.op.shutdown:
9229
    if self.op.remove_instance and not self.op.shutdown:
9239 9230
      raise errors.OpPrereqError("Can not remove instance without shutting it"
9240 9231
                                 " down before")
9241 9232

  
9242
    if self.export_mode not in constants.EXPORT_MODES:
9243
      raise errors.OpPrereqError("Invalid export mode %r" % self.export_mode,
9233
    if self.op.mode not in constants.EXPORT_MODES:
9234
      raise errors.OpPrereqError("Invalid export mode %r" % self.op.mode,
9244 9235
                                 errors.ECODE_INVAL)
9245 9236

  
9246
    if self.export_mode == constants.EXPORT_MODE_REMOTE:
9237
    if self.op.mode == constants.EXPORT_MODE_REMOTE:
9247 9238
      if not self.x509_key_name:
9248 9239
        raise errors.OpPrereqError("Missing X509 key name for encryption",
9249 9240
                                   errors.ECODE_INVAL)
......
9256 9247
    self._ExpandAndLockInstance()
9257 9248

  
9258 9249
    # Lock all nodes for local exports
9259
    if self.export_mode == constants.EXPORT_MODE_LOCAL:
9250
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9260 9251
      # FIXME: lock only instance primary and destination node
9261 9252
      #
9262 9253
      # Sad but true, for now we have do lock all nodes, as we don't know where
9263 9254
      # the previous export might be, and in this LU we search for it and
9264 9255
      # remove it from its current node. In the future we could fix this by:
9265
      #  - making a tasklet to search (share-lock all), then create the new one,
9266
      #    then one to remove, after
9256
      #  - making a tasklet to search (share-lock all), then create the
9257
      #    new one, then one to remove, after
9267 9258
      #  - removing the removal operation altogether
9268 9259
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
9269 9260

  
......
9278 9269

  
9279 9270
    """
9280 9271
    env = {
9281
      "EXPORT_MODE": self.export_mode,
9272
      "EXPORT_MODE": self.op.mode,
9282 9273
      "EXPORT_NODE": self.op.target_node,
9283 9274
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
9284
      "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
9275
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
9285 9276
      # TODO: Generic function for boolean env variables
9286
      "REMOVE_INSTANCE": str(bool(self.remove_instance)),
9277
      "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
9287 9278
      }
9288 9279

  
9289 9280
    env.update(_BuildInstanceHookEnvByObject(self, self.instance))
9290 9281

  
9291 9282
    nl = [self.cfg.GetMasterNode(), self.instance.primary_node]
9292 9283

  
9293
    if self.export_mode == constants.EXPORT_MODE_LOCAL:
9284
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9294 9285
      nl.append(self.op.target_node)
9295 9286

  
9296 9287
    return env, nl, nl
......
9308 9299
          "Cannot retrieve locked instance %s" % self.op.instance_name
9309 9300
    _CheckNodeOnline(self, self.instance.primary_node)
9310 9301

  
9311
    if self.export_mode == constants.EXPORT_MODE_LOCAL:
9302
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9312 9303
      self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node)
9313 9304
      self.dst_node = self.cfg.GetNodeInfo(self.op.target_node)
9314 9305
      assert self.dst_node is not None
......
9320 9311
      self.dest_disk_info = None
9321 9312
      self.dest_x509_ca = None
9322 9313

  
9323
    elif self.export_mode == constants.EXPORT_MODE_REMOTE:
9314
    elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9324 9315
      self.dst_node = None
9325 9316

  
9326 9317
      if len(self.op.target_node) != len(self.instance.disks):
......
9351 9342

  
9352 9343
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
9353 9344
      if errcode is not None:
9354
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" % (msg, ),
9355
                                   errors.ECODE_INVAL)
9345
        raise errors.OpPrereqError("Invalid destination X509 CA (%s)" %
9346
                                   (msg, ), errors.ECODE_INVAL)
9356 9347

  
9357 9348
      self.dest_x509_ca = cert
9358 9349

  
......
9363 9354
          (host, port, magic) = \
9364 9355
            masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
9365 9356
        except errors.GenericError, err:
9366
          raise errors.OpPrereqError("Target info for disk %s: %s" % (idx, err),
9367
                                     errors.ECODE_INVAL)
9357
          raise errors.OpPrereqError("Target info for disk %s: %s" %
9358
                                     (idx, err), errors.ECODE_INVAL)
9368 9359

  
9369 9360
        disk_info.append((host, port, magic))
9370 9361

  
......
9373 9364

  
9374 9365
    else:
9375 9366
      raise errors.ProgrammerError("Unhandled export mode %r" %
9376
                                   self.export_mode)
9367
                                   self.op.mode)
9377 9368

  
9378 9369
    # instance disk type verification
9379 9370
    # TODO: Implement export support for file-based disks
......
9389 9380
    exports will be removed from the nodes A, B and D.
9390 9381

  
9391 9382
    """
9392
    assert self.export_mode != constants.EXPORT_MODE_REMOTE
9383
    assert self.op.mode != constants.EXPORT_MODE_REMOTE
9393 9384

  
9394 9385
    nodelist = self.cfg.GetNodeList()
9395 9386
    nodelist.remove(self.dst_node.name)
......
9414 9405
    """Export an instance to an image in the cluster.
9415 9406

  
9416 9407
    """
9417
    assert self.export_mode in constants.EXPORT_MODES
9408
    assert self.op.mode in constants.EXPORT_MODES
9418 9409

  
9419 9410
    instance = self.instance
9420 9411
    src_node = instance.primary_node
......
9423 9414
      # shutdown the instance, but not the disks
9424 9415
      feedback_fn("Shutting down instance %s" % instance.name)
9425 9416
      result = self.rpc.call_instance_shutdown(src_node, instance,
9426
                                               self.shutdown_timeout)
9417
                                               self.op.shutdown_timeout)
9427 9418
      # TODO: Maybe ignore failures if ignore_remove_failures is set
9428 9419
      result.Raise("Could not shutdown instance %s on"
9429 9420
                   " node %s" % (instance.name, src_node))
......
9447 9438
      helper.CreateSnapshots()
9448 9439
      try:
9449 9440
        if (self.op.shutdown and instance.admin_up and
9450
            not self.remove_instance):
9441
            not self.op.remove_instance):
9451 9442
          assert not activate_disks
9452 9443
          feedback_fn("Starting instance %s" % instance.name)
9453 9444
          result = self.rpc.call_instance_start(src_node, instance, None, None)
......
9457 9448
            _ShutdownInstanceDisks(self, instance)
9458 9449
            raise errors.OpExecError("Could not start instance: %s" % msg)
9459 9450

  
9460
        if self.export_mode == constants.EXPORT_MODE_LOCAL:
9451
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
9461 9452
          (fin_resu, dresults) = helper.LocalExport(self.dst_node)
9462
        elif self.export_mode == constants.EXPORT_MODE_REMOTE:
9453
        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
9463 9454
          connect_timeout = constants.RIE_CONNECT_TIMEOUT
9464 9455
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
9465 9456

  
......
9486 9477
        _ShutdownInstanceDisks(self, instance)
9487 9478

  
9488 9479
    # Remove instance if requested
9489
    if self.remove_instance:
9480
    if self.op.remove_instance:
9490 9481
      if not (compat.all(dresults) and fin_resu):
9491 9482
        feedback_fn("Not removing instance %s as parts of the export failed" %
9492 9483
                    instance.name)
9493 9484
      else:
9494 9485
        feedback_fn("Removing instance %s" % instance.name)
9495 9486
        _RemoveInstance(self, feedback_fn, instance,
9496
                        self.ignore_remove_failures)
9487
                        self.op.ignore_remove_failures)
9497 9488

  
9498
    if self.export_mode == constants.EXPORT_MODE_LOCAL:
9489
    if self.op.mode == constants.EXPORT_MODE_LOCAL:
9499 9490
      self._CleanupExports(feedback_fn)
9500 9491

  
9501 9492
    return fin_resu, dresults
......
10095 10086

  
10096 10087
  """
10097 10088
  _OP_REQP = ["direction", "mode", "name"]
10089
  _OP_DEFS = [
10090
    ("hypervisor", None),
10091
    ("allocator", None),
10092
    ]
10098 10093

  
10099 10094
  def CheckPrereq(self):
10100 10095
    """Check prerequisites.
......
10133 10128
            row["mode"] not in ['r', 'w']):
10134 10129
          raise errors.OpPrereqError("Invalid contents of the 'disks'"
10135 10130
                                     " parameter", errors.ECODE_INVAL)
10136
      if not hasattr(self.op, "hypervisor") or self.op.hypervisor is None:
10131
      if self.op.hypervisor is None:
10137 10132
        self.op.hypervisor = self.cfg.GetHypervisorType()
10138 10133
    elif self.op.mode == constants.IALLOCATOR_MODE_RELOC:
10139 10134
      if not hasattr(self.op, "name"):
......
10151 10146
                                 self.op.mode, errors.ECODE_INVAL)
10152 10147

  
10153 10148
    if self.op.direction == constants.IALLOCATOR_DIR_OUT:
10154
      if not hasattr(self.op, "allocator") or self.op.allocator is None:
10149
      if self.op.allocator is None:
10155 10150
        raise errors.OpPrereqError("Missing allocator name",
10156 10151
                                   errors.ECODE_INVAL)
10157 10152
    elif self.op.direction != constants.IALLOCATOR_DIR_IN:

Also available in: Unified diff