Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 31d3b918

History | View | Annotate | Download (152.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
import ganeti.rpc.node as rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177
    vlan = nic.get(constants.INIC_VLAN, None)
178

    
179
    if net is None or net.lower() == constants.VALUE_NONE:
180
      net = None
181
    else:
182
      if nic_mode_req is not None or link is not None:
183
        raise errors.OpPrereqError("If network is given, no mode or link"
184
                                   " is allowed to be passed",
185
                                   errors.ECODE_INVAL)
186

    
187
    # ip validity checks
188
    if ip is None or ip.lower() == constants.VALUE_NONE:
189
      nic_ip = None
190
    elif ip.lower() == constants.VALUE_AUTO:
191
      if not op.name_check:
192
        raise errors.OpPrereqError("IP address set to auto but name checks"
193
                                   " have been skipped",
194
                                   errors.ECODE_INVAL)
195
      nic_ip = default_ip
196
    else:
197
      # We defer pool operations until later, so that the iallocator has
198
      # filled in the instance's node(s) dimara
199
      if ip.lower() == constants.NIC_IP_POOL:
200
        if net is None:
201
          raise errors.OpPrereqError("if ip=pool, parameter network"
202
                                     " must be passed too",
203
                                     errors.ECODE_INVAL)
204

    
205
      elif not netutils.IPAddress.IsValid(ip):
206
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
207
                                   errors.ECODE_INVAL)
208

    
209
      nic_ip = ip
210

    
211
    # TODO: check the ip address for uniqueness
212
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
213
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
214
                                 errors.ECODE_INVAL)
215

    
216
    # MAC address verification
217
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
218
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
219
      mac = utils.NormalizeAndValidateMac(mac)
220

    
221
      try:
222
        # TODO: We need to factor this out
223
        cfg.ReserveMAC(mac, ec_id)
224
      except errors.ReservationError:
225
        raise errors.OpPrereqError("MAC address %s already in use"
226
                                   " in cluster" % mac,
227
                                   errors.ECODE_NOTUNIQUE)
228

    
229
    #  Build nic parameters
230
    nicparams = {}
231
    if nic_mode_req:
232
      nicparams[constants.NIC_MODE] = nic_mode
233
    if link:
234
      nicparams[constants.NIC_LINK] = link
235
    if vlan:
236
      nicparams[constants.NIC_VLAN] = vlan
237

    
238
    check_params = cluster.SimpleFillNIC(nicparams)
239
    objects.NIC.CheckParameterSyntax(check_params)
240
    net_uuid = cfg.LookupNetwork(net)
241
    name = nic.get(constants.INIC_NAME, None)
242
    if name is not None and name.lower() == constants.VALUE_NONE:
243
      name = None
244
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
245
                          network=net_uuid, nicparams=nicparams)
246
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247
    nics.append(nic_obj)
248

    
249
  return nics
250

    
251

    
252
def _CheckForConflictingIp(lu, ip, node_uuid):
253
  """In case of conflicting IP address raise error.
254

255
  @type ip: string
256
  @param ip: IP address
257
  @type node_uuid: string
258
  @param node_uuid: node UUID
259

260
  """
261
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
262
  if conf_net is not None:
263
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
264
                                " network %s, but the target NIC does not." %
265
                                (ip, conf_net)),
266
                               errors.ECODE_STATE)
267

    
268
  return (None, None)
269

    
270

    
271
def _ComputeIPolicyInstanceSpecViolation(
272
  ipolicy, instance_spec, disk_template,
273
  _compute_fn=ComputeIPolicySpecViolation):
274
  """Compute if instance specs meets the specs of ipolicy.
275

276
  @type ipolicy: dict
277
  @param ipolicy: The ipolicy to verify against
278
  @param instance_spec: dict
279
  @param instance_spec: The instance spec to verify
280
  @type disk_template: string
281
  @param disk_template: the disk template of the instance
282
  @param _compute_fn: The function to verify ipolicy (unittest only)
283
  @see: L{ComputeIPolicySpecViolation}
284

285
  """
286
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
287
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
288
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
289
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
290
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
291
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
292

    
293
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
294
                     disk_sizes, spindle_use, disk_template)
295

    
296

    
297
def _CheckOSVariant(os_obj, name):
298
  """Check whether an OS name conforms to the os variants specification.
299

300
  @type os_obj: L{objects.OS}
301
  @param os_obj: OS object to check
302
  @type name: string
303
  @param name: OS name passed by the user, to check for validity
304

305
  """
306
  variant = objects.OS.GetVariant(name)
307
  if not os_obj.supported_variants:
308
    if variant:
309
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
310
                                 " passed)" % (os_obj.name, variant),
311
                                 errors.ECODE_INVAL)
312
    return
313
  if not variant:
314
    raise errors.OpPrereqError("OS name must include a variant",
315
                               errors.ECODE_INVAL)
316

    
317
  if variant not in os_obj.supported_variants:
318
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
319

    
320

    
321
class LUInstanceCreate(LogicalUnit):
322
  """Create an instance.
323

324
  """
325
  HPATH = "instance-add"
326
  HTYPE = constants.HTYPE_INSTANCE
327
  REQ_BGL = False
328

    
329
  def _CheckDiskTemplateValid(self):
330
    """Checks validity of disk template.
331

332
    """
333
    cluster = self.cfg.GetClusterInfo()
334
    if self.op.disk_template is None:
335
      # FIXME: It would be better to take the default disk template from the
336
      # ipolicy, but for the ipolicy we need the primary node, which we get from
337
      # the iallocator, which wants the disk template as input. To solve this
338
      # chicken-and-egg problem, it should be possible to specify just a node
339
      # group from the iallocator and take the ipolicy from that.
340
      self.op.disk_template = cluster.enabled_disk_templates[0]
341
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
342

    
343
  def _CheckDiskArguments(self):
344
    """Checks validity of disk-related arguments.
345

346
    """
347
    # check that disk's names are unique and valid
348
    utils.ValidateDeviceNames("disk", self.op.disks)
349

    
350
    self._CheckDiskTemplateValid()
351

    
352
    # check disks. parameter names and consistent adopt/no-adopt strategy
353
    has_adopt = has_no_adopt = False
354
    for disk in self.op.disks:
355
      if self.op.disk_template != constants.DT_EXT:
356
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
357
      if constants.IDISK_ADOPT in disk:
358
        has_adopt = True
359
      else:
360
        has_no_adopt = True
361
    if has_adopt and has_no_adopt:
362
      raise errors.OpPrereqError("Either all disks are adopted or none is",
363
                                 errors.ECODE_INVAL)
364
    if has_adopt:
365
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
366
        raise errors.OpPrereqError("Disk adoption is not supported for the"
367
                                   " '%s' disk template" %
368
                                   self.op.disk_template,
369
                                   errors.ECODE_INVAL)
370
      if self.op.iallocator is not None:
371
        raise errors.OpPrereqError("Disk adoption not allowed with an"
372
                                   " iallocator script", errors.ECODE_INVAL)
373
      if self.op.mode == constants.INSTANCE_IMPORT:
374
        raise errors.OpPrereqError("Disk adoption not allowed for"
375
                                   " instance import", errors.ECODE_INVAL)
376
    else:
377
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
378
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
379
                                   " but no 'adopt' parameter given" %
380
                                   self.op.disk_template,
381
                                   errors.ECODE_INVAL)
382

    
383
    self.adopt_disks = has_adopt
384

    
385
  def _CheckVLANArguments(self):
386
    """ Check validity of VLANs if given
387

388
    """
389
    for nic in self.op.nics:
390
      vlan = nic.get(constants.INIC_VLAN, None)
391
      if vlan:
392
        if vlan[0] == ".":
393
          # vlan starting with dot means single untagged vlan,
394
          # might be followed by trunk (:)
395
          if not vlan[1:].isdigit():
396
            vlanlist = vlan[1:].split(':')
397
            for vl in vlanlist:
398
              if not vl.isdigit():
399
                raise errors.OpPrereqError("Specified VLAN parameter is "
400
                                           "invalid : %s" % vlan,
401
                                             errors.ECODE_INVAL)
402
        elif vlan[0] == ":":
403
          # Trunk - tagged only
404
          vlanlist = vlan[1:].split(':')
405
          for vl in vlanlist:
406
            if not vl.isdigit():
407
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
408
                                           " : %s" % vlan, errors.ECODE_INVAL)
409
        elif vlan.isdigit():
410
          # This is the simplest case. No dots, only single digit
411
          # -> Create untagged access port, dot needs to be added
412
          nic[constants.INIC_VLAN] = "." + vlan
413
        else:
414
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
415
                                       " : %s" % vlan, errors.ECODE_INVAL)
416

    
417
  def CheckArguments(self):
418
    """Check arguments.
419

420
    """
421
    # do not require name_check to ease forward/backward compatibility
422
    # for tools
423
    if self.op.no_install and self.op.start:
424
      self.LogInfo("No-installation mode selected, disabling startup")
425
      self.op.start = False
426
    # validate/normalize the instance name
427
    self.op.instance_name = \
428
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
429

    
430
    if self.op.ip_check and not self.op.name_check:
431
      # TODO: make the ip check more flexible and not depend on the name check
432
      raise errors.OpPrereqError("Cannot do IP address check without a name"
433
                                 " check", errors.ECODE_INVAL)
434

    
435
    # add nic for instance communication
436
    if self.op.instance_communication:
437
      nic_name = "%s%s" % (constants.INSTANCE_COMMUNICATION_NIC_PREFIX,
438
                           self.op.instance_name)
439
      communication_network = constants.INSTANCE_COMMUNICATION_NETWORK
440

    
441
      self.op.nics.append({constants.INIC_NAME: nic_name,
442
                           constants.INIC_MAC: constants.VALUE_GENERATE,
443
                           constants.INIC_IP: constants.NIC_IP_POOL,
444
                           constants.INIC_NETWORK: communication_network})
445

    
446
    # check nics' parameter names
447
    for nic in self.op.nics:
448
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
449
    # check that NIC's parameters names are unique and valid
450
    utils.ValidateDeviceNames("NIC", self.op.nics)
451

    
452
    self._CheckVLANArguments()
453

    
454
    self._CheckDiskArguments()
455
    assert self.op.disk_template is not None
456

    
457
    # instance name verification
458
    if self.op.name_check:
459
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
460
      self.op.instance_name = self.hostname.name
461
      # used in CheckPrereq for ip ping check
462
      self.check_ip = self.hostname.ip
463
    else:
464
      self.check_ip = None
465

    
466
    # file storage checks
467
    if (self.op.file_driver and
468
        not self.op.file_driver in constants.FILE_DRIVER):
469
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
470
                                 self.op.file_driver, errors.ECODE_INVAL)
471

    
472
    # set default file_driver if unset and required
473
    if (not self.op.file_driver and
474
        self.op.disk_template in constants.DTS_FILEBASED):
475
      self.op.file_driver = constants.FD_LOOP
476

    
477
    ### Node/iallocator related checks
478
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
479

    
480
    if self.op.pnode is not None:
481
      if self.op.disk_template in constants.DTS_INT_MIRROR:
482
        if self.op.snode is None:
483
          raise errors.OpPrereqError("The networked disk templates need"
484
                                     " a mirror node", errors.ECODE_INVAL)
485
      elif self.op.snode:
486
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
487
                        " template")
488
        self.op.snode = None
489

    
490
    _CheckOpportunisticLocking(self.op)
491

    
492
    if self.op.mode == constants.INSTANCE_IMPORT:
493
      # On import force_variant must be True, because if we forced it at
494
      # initial install, our only chance when importing it back is that it
495
      # works again!
496
      self.op.force_variant = True
497

    
498
      if self.op.no_install:
499
        self.LogInfo("No-installation mode has no effect during import")
500

    
501
    elif self.op.mode == constants.INSTANCE_CREATE:
502
      if self.op.os_type is None:
503
        raise errors.OpPrereqError("No guest OS specified",
504
                                   errors.ECODE_INVAL)
505
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
506
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
507
                                   " installation" % self.op.os_type,
508
                                   errors.ECODE_STATE)
509
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
510
      self._cds = GetClusterDomainSecret()
511

    
512
      # Check handshake to ensure both clusters have the same domain secret
513
      src_handshake = self.op.source_handshake
514
      if not src_handshake:
515
        raise errors.OpPrereqError("Missing source handshake",
516
                                   errors.ECODE_INVAL)
517

    
518
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
519
                                                           src_handshake)
520
      if errmsg:
521
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
522
                                   errors.ECODE_INVAL)
523

    
524
      # Load and check source CA
525
      self.source_x509_ca_pem = self.op.source_x509_ca
526
      if not self.source_x509_ca_pem:
527
        raise errors.OpPrereqError("Missing source X509 CA",
528
                                   errors.ECODE_INVAL)
529

    
530
      try:
531
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
532
                                                    self._cds)
533
      except OpenSSL.crypto.Error, err:
534
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
535
                                   (err, ), errors.ECODE_INVAL)
536

    
537
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
538
      if errcode is not None:
539
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
540
                                   errors.ECODE_INVAL)
541

    
542
      self.source_x509_ca = cert
543

    
544
      src_instance_name = self.op.source_instance_name
545
      if not src_instance_name:
546
        raise errors.OpPrereqError("Missing source instance name",
547
                                   errors.ECODE_INVAL)
548

    
549
      self.source_instance_name = \
550
        netutils.GetHostname(name=src_instance_name).name
551

    
552
    else:
553
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
554
                                 self.op.mode, errors.ECODE_INVAL)
555

    
556
  def ExpandNames(self):
557
    """ExpandNames for CreateInstance.
558

559
    Figure out the right locks for instance creation.
560

561
    """
562
    self.needed_locks = {}
563

    
564
    # this is just a preventive check, but someone might still add this
565
    # instance in the meantime, and creation will fail at lock-add time
566
    if self.op.instance_name in\
567
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
568
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
569
                                 self.op.instance_name, errors.ECODE_EXISTS)
570

    
571
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
572

    
573
    if self.op.iallocator:
574
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
575
      # specifying a group on instance creation and then selecting nodes from
576
      # that group
577
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
578
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
579

    
580
      if self.op.opportunistic_locking:
581
        self.opportunistic_locks[locking.LEVEL_NODE] = True
582
    else:
583
      (self.op.pnode_uuid, self.op.pnode) = \
584
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
585
      nodelist = [self.op.pnode_uuid]
586
      if self.op.snode is not None:
587
        (self.op.snode_uuid, self.op.snode) = \
588
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
589
        nodelist.append(self.op.snode_uuid)
590
      self.needed_locks[locking.LEVEL_NODE] = nodelist
591

    
592
    # in case of import lock the source node too
593
    if self.op.mode == constants.INSTANCE_IMPORT:
594
      src_node = self.op.src_node
595
      src_path = self.op.src_path
596

    
597
      if src_path is None:
598
        self.op.src_path = src_path = self.op.instance_name
599

    
600
      if src_node is None:
601
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
602
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
603
        self.op.src_node = None
604
        if os.path.isabs(src_path):
605
          raise errors.OpPrereqError("Importing an instance from a path"
606
                                     " requires a source node option",
607
                                     errors.ECODE_INVAL)
608
      else:
609
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
610
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
611
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
612
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
613
        if not os.path.isabs(src_path):
614
          self.op.src_path = \
615
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
616

    
617
    self.needed_locks[locking.LEVEL_NODE_RES] = \
618
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
619

    
620
    # Optimistically acquire shared group locks (we're reading the
621
    # configuration).  We can't just call GetInstanceNodeGroups, because the
622
    # instance doesn't exist yet. Therefore we lock all node groups of all
623
    # nodes we have.
624
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
625
      # In the case we lock all nodes for opportunistic allocation, we have no
626
      # choice than to lock all groups, because they're allocated before nodes.
627
      # This is sad, but true. At least we release all those we don't need in
628
      # CheckPrereq later.
629
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
630
    else:
631
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
632
        list(self.cfg.GetNodeGroupsFromNodes(
633
          self.needed_locks[locking.LEVEL_NODE]))
634
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
635

    
636
  def DeclareLocks(self, level):
637
    if level == locking.LEVEL_NODE_RES and \
638
      self.opportunistic_locks[locking.LEVEL_NODE]:
639
      # Even when using opportunistic locking, we require the same set of
640
      # NODE_RES locks as we got NODE locks
641
      self.needed_locks[locking.LEVEL_NODE_RES] = \
642
        self.owned_locks(locking.LEVEL_NODE)
643

    
644
  def _RunAllocator(self):
645
    """Run the allocator based on input opcode.
646

647
    """
648
    if self.op.opportunistic_locking:
649
      # Only consider nodes for which a lock is held
650
      node_name_whitelist = self.cfg.GetNodeNames(
651
        self.owned_locks(locking.LEVEL_NODE))
652
    else:
653
      node_name_whitelist = None
654

    
655
    req = _CreateInstanceAllocRequest(self.op, self.disks,
656
                                      self.nics, self.be_full,
657
                                      node_name_whitelist)
658
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
659

    
660
    ial.Run(self.op.iallocator)
661

    
662
    if not ial.success:
663
      # When opportunistic locks are used only a temporary failure is generated
664
      if self.op.opportunistic_locking:
665
        ecode = errors.ECODE_TEMP_NORES
666
      else:
667
        ecode = errors.ECODE_NORES
668

    
669
      raise errors.OpPrereqError("Can't compute nodes using"
670
                                 " iallocator '%s': %s" %
671
                                 (self.op.iallocator, ial.info),
672
                                 ecode)
673

    
674
    (self.op.pnode_uuid, self.op.pnode) = \
675
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
676
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
677
                 self.op.instance_name, self.op.iallocator,
678
                 utils.CommaJoin(ial.result))
679

    
680
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
681

    
682
    if req.RequiredNodes() == 2:
683
      (self.op.snode_uuid, self.op.snode) = \
684
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
685

    
686
  def BuildHooksEnv(self):
687
    """Build hooks env.
688

689
    This runs on master, primary and secondary nodes of the instance.
690

691
    """
692
    env = {
693
      "ADD_MODE": self.op.mode,
694
      }
695
    if self.op.mode == constants.INSTANCE_IMPORT:
696
      env["SRC_NODE"] = self.op.src_node
697
      env["SRC_PATH"] = self.op.src_path
698
      env["SRC_IMAGES"] = self.src_images
699

    
700
    env.update(BuildInstanceHookEnv(
701
      name=self.op.instance_name,
702
      primary_node_name=self.op.pnode,
703
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
704
      status=self.op.start,
705
      os_type=self.op.os_type,
706
      minmem=self.be_full[constants.BE_MINMEM],
707
      maxmem=self.be_full[constants.BE_MAXMEM],
708
      vcpus=self.be_full[constants.BE_VCPUS],
709
      nics=NICListToTuple(self, self.nics),
710
      disk_template=self.op.disk_template,
711
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
712
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
713
             for d in self.disks],
714
      bep=self.be_full,
715
      hvp=self.hv_full,
716
      hypervisor_name=self.op.hypervisor,
717
      tags=self.op.tags,
718
      ))
719

    
720
    return env
721

    
722
  def BuildHooksNodes(self):
723
    """Build hooks nodes.
724

725
    """
726
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
727
    return nl, nl
728

    
729
  def _ReadExportInfo(self):
730
    """Reads the export information from disk.
731

732
    It will override the opcode source node and path with the actual
733
    information, if these two were not specified before.
734

735
    @return: the export information
736

737
    """
738
    assert self.op.mode == constants.INSTANCE_IMPORT
739

    
740
    if self.op.src_node_uuid is None:
741
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
742
      exp_list = self.rpc.call_export_list(locked_nodes)
743
      found = False
744
      for node_uuid in exp_list:
745
        if exp_list[node_uuid].fail_msg:
746
          continue
747
        if self.op.src_path in exp_list[node_uuid].payload:
748
          found = True
749
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
750
          self.op.src_node_uuid = node_uuid
751
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
752
                                            self.op.src_path)
753
          break
754
      if not found:
755
        raise errors.OpPrereqError("No export found for relative path %s" %
756
                                   self.op.src_path, errors.ECODE_INVAL)
757

    
758
    CheckNodeOnline(self, self.op.src_node_uuid)
759
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
760
    result.Raise("No export or invalid export found in dir %s" %
761
                 self.op.src_path)
762

    
763
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
764
    if not export_info.has_section(constants.INISECT_EXP):
765
      raise errors.ProgrammerError("Corrupted export config",
766
                                   errors.ECODE_ENVIRON)
767

    
768
    ei_version = export_info.get(constants.INISECT_EXP, "version")
769
    if int(ei_version) != constants.EXPORT_VERSION:
770
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
771
                                 (ei_version, constants.EXPORT_VERSION),
772
                                 errors.ECODE_ENVIRON)
773
    return export_info
774

    
775
  def _ReadExportParams(self, einfo):
776
    """Use export parameters as defaults.
777

778
    In case the opcode doesn't specify (as in override) some instance
779
    parameters, then try to use them from the export information, if
780
    that declares them.
781

782
    """
783
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
784

    
785
    if not self.op.disks:
786
      disks = []
787
      # TODO: import the disk iv_name too
788
      for idx in range(constants.MAX_DISKS):
789
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
790
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
791
          disks.append({constants.IDISK_SIZE: disk_sz})
792
      self.op.disks = disks
793
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
794
        raise errors.OpPrereqError("No disk info specified and the export"
795
                                   " is missing the disk information",
796
                                   errors.ECODE_INVAL)
797

    
798
    if not self.op.nics:
799
      nics = []
800
      for idx in range(constants.MAX_NICS):
801
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
802
          ndict = {}
803
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
804
            nic_param_name = "nic%d_%s" % (idx, name)
805
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
806
              v = einfo.get(constants.INISECT_INS, nic_param_name)
807
              ndict[name] = v
808
          nics.append(ndict)
809
        else:
810
          break
811
      self.op.nics = nics
812

    
813
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
814
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
815

    
816
    if (self.op.hypervisor is None and
817
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
818
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
819

    
820
    if einfo.has_section(constants.INISECT_HYP):
821
      # use the export parameters but do not override the ones
822
      # specified by the user
823
      for name, value in einfo.items(constants.INISECT_HYP):
824
        if name not in self.op.hvparams:
825
          self.op.hvparams[name] = value
826

    
827
    if einfo.has_section(constants.INISECT_BEP):
828
      # use the parameters, without overriding
829
      for name, value in einfo.items(constants.INISECT_BEP):
830
        if name not in self.op.beparams:
831
          self.op.beparams[name] = value
832
        # Compatibility for the old "memory" be param
833
        if name == constants.BE_MEMORY:
834
          if constants.BE_MAXMEM not in self.op.beparams:
835
            self.op.beparams[constants.BE_MAXMEM] = value
836
          if constants.BE_MINMEM not in self.op.beparams:
837
            self.op.beparams[constants.BE_MINMEM] = value
838
    else:
839
      # try to read the parameters old style, from the main section
840
      for name in constants.BES_PARAMETERS:
841
        if (name not in self.op.beparams and
842
            einfo.has_option(constants.INISECT_INS, name)):
843
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
844

    
845
    if einfo.has_section(constants.INISECT_OSP):
846
      # use the parameters, without overriding
847
      for name, value in einfo.items(constants.INISECT_OSP):
848
        if name not in self.op.osparams:
849
          self.op.osparams[name] = value
850

    
851
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
852
      # use the parameters, without overriding
853
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
854
        if name not in self.op.osparams_private:
855
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
856

    
857
  def _RevertToDefaults(self, cluster):
858
    """Revert the instance parameters to the default values.
859

860
    """
861
    # hvparams
862
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
863
    for name in self.op.hvparams.keys():
864
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
865
        del self.op.hvparams[name]
866
    # beparams
867
    be_defs = cluster.SimpleFillBE({})
868
    for name in self.op.beparams.keys():
869
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
870
        del self.op.beparams[name]
871
    # nic params
872
    nic_defs = cluster.SimpleFillNIC({})
873
    for nic in self.op.nics:
874
      for name in constants.NICS_PARAMETERS:
875
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
876
          del nic[name]
877
    # osparams
878
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
879
    for name in self.op.osparams.keys():
880
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
881
        del self.op.osparams[name]
882

    
883
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
884
                                    os_params_private={})
885
    for name in self.op.osparams_private.keys():
886
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
887
        del self.op.osparams_private[name]
888

    
889
  def _CalculateFileStorageDir(self):
890
    """Calculate final instance file storage dir.
891

892
    """
893
    # file storage dir calculation/check
894
    self.instance_file_storage_dir = None
895
    if self.op.disk_template in constants.DTS_FILEBASED:
896
      # build the full file storage dir path
897
      joinargs = []
898

    
899
      cfg_storage = None
900
      if self.op.disk_template == constants.DT_FILE:
901
        cfg_storage = self.cfg.GetFileStorageDir()
902
      elif self.op.disk_template == constants.DT_SHARED_FILE:
903
        cfg_storage = self.cfg.GetSharedFileStorageDir()
904
      elif self.op.disk_template == constants.DT_GLUSTER:
905
        cfg_storage = self.cfg.GetGlusterStorageDir()
906

    
907
      if not cfg_storage:
908
        raise errors.OpPrereqError(
909
          "Cluster file storage dir for {tpl} storage type not defined".format(
910
            tpl=repr(self.op.disk_template)
911
          ),
912
          errors.ECODE_STATE
913
      )
914

    
915
      joinargs.append(cfg_storage)
916

    
917
      if self.op.file_storage_dir is not None:
918
        joinargs.append(self.op.file_storage_dir)
919

    
920
      if self.op.disk_template != constants.DT_GLUSTER:
921
        joinargs.append(self.op.instance_name)
922

    
923
      if len(joinargs) > 1:
924
        # pylint: disable=W0142
925
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
926
      else:
927
        self.instance_file_storage_dir = joinargs[0]
928

    
929
  def CheckPrereq(self): # pylint: disable=R0914
930
    """Check prerequisites.
931

932
    """
933
    # Check that the optimistically acquired groups are correct wrt the
934
    # acquired nodes
935
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
936
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
937
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
938
    if not owned_groups.issuperset(cur_groups):
939
      raise errors.OpPrereqError("New instance %s's node groups changed since"
940
                                 " locks were acquired, current groups are"
941
                                 " are '%s', owning groups '%s'; retry the"
942
                                 " operation" %
943
                                 (self.op.instance_name,
944
                                  utils.CommaJoin(cur_groups),
945
                                  utils.CommaJoin(owned_groups)),
946
                                 errors.ECODE_STATE)
947

    
948
    self._CalculateFileStorageDir()
949

    
950
    if self.op.mode == constants.INSTANCE_IMPORT:
951
      export_info = self._ReadExportInfo()
952
      self._ReadExportParams(export_info)
953
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
954
    else:
955
      self._old_instance_name = None
956

    
957
    if (not self.cfg.GetVGName() and
958
        self.op.disk_template not in constants.DTS_NOT_LVM):
959
      raise errors.OpPrereqError("Cluster does not support lvm-based"
960
                                 " instances", errors.ECODE_STATE)
961

    
962
    if (self.op.hypervisor is None or
963
        self.op.hypervisor == constants.VALUE_AUTO):
964
      self.op.hypervisor = self.cfg.GetHypervisorType()
965

    
966
    cluster = self.cfg.GetClusterInfo()
967
    enabled_hvs = cluster.enabled_hypervisors
968
    if self.op.hypervisor not in enabled_hvs:
969
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
970
                                 " cluster (%s)" %
971
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
972
                                 errors.ECODE_STATE)
973

    
974
    # Check tag validity
975
    for tag in self.op.tags:
976
      objects.TaggableObject.ValidateTag(tag)
977

    
978
    # check hypervisor parameter syntax (locally)
979
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
980
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
981
                                      self.op.hvparams)
982
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
983
    hv_type.CheckParameterSyntax(filled_hvp)
984
    self.hv_full = filled_hvp
985
    # check that we don't specify global parameters on an instance
986
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
987
                         "instance", "cluster")
988

    
989
    # fill and remember the beparams dict
990
    self.be_full = _ComputeFullBeParams(self.op, cluster)
991

    
992
    # build os parameters
993
    if self.op.osparams_private is None:
994
      self.op.osparams_private = serializer.PrivateDict()
995
    if self.op.osparams_secret is None:
996
      self.op.osparams_secret = serializer.PrivateDict()
997

    
998
    self.os_full = cluster.SimpleFillOS(
999
      self.op.os_type,
1000
      self.op.osparams,
1001
      os_params_private=self.op.osparams_private,
1002
      os_params_secret=self.op.osparams_secret
1003
    )
1004

    
1005
    # now that hvp/bep are in final format, let's reset to defaults,
1006
    # if told to do so
1007
    if self.op.identify_defaults:
1008
      self._RevertToDefaults(cluster)
1009

    
1010
    # NIC buildup
1011
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1012
                             self.proc.GetECId())
1013

    
1014
    # disk checks/pre-build
1015
    default_vg = self.cfg.GetVGName()
1016
    self.disks = ComputeDisks(self.op, default_vg)
1017

    
1018
    if self.op.mode == constants.INSTANCE_IMPORT:
1019
      disk_images = []
1020
      for idx in range(len(self.disks)):
1021
        option = "disk%d_dump" % idx
1022
        if export_info.has_option(constants.INISECT_INS, option):
1023
          # FIXME: are the old os-es, disk sizes, etc. useful?
1024
          export_name = export_info.get(constants.INISECT_INS, option)
1025
          image = utils.PathJoin(self.op.src_path, export_name)
1026
          disk_images.append(image)
1027
        else:
1028
          disk_images.append(False)
1029

    
1030
      self.src_images = disk_images
1031

    
1032
      if self.op.instance_name == self._old_instance_name:
1033
        for idx, nic in enumerate(self.nics):
1034
          if nic.mac == constants.VALUE_AUTO:
1035
            nic_mac_ini = "nic%d_mac" % idx
1036
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1037

    
1038
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1039

    
1040
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1041
    if self.op.ip_check:
1042
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1043
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1044
                                   (self.check_ip, self.op.instance_name),
1045
                                   errors.ECODE_NOTUNIQUE)
1046

    
1047
    #### mac address generation
1048
    # By generating here the mac address both the allocator and the hooks get
1049
    # the real final mac address rather than the 'auto' or 'generate' value.
1050
    # There is a race condition between the generation and the instance object
1051
    # creation, which means that we know the mac is valid now, but we're not
1052
    # sure it will be when we actually add the instance. If things go bad
1053
    # adding the instance will abort because of a duplicate mac, and the
1054
    # creation job will fail.
1055
    for nic in self.nics:
1056
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1057
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1058

    
1059
    #### allocator run
1060

    
1061
    if self.op.iallocator is not None:
1062
      self._RunAllocator()
1063

    
1064
    # Release all unneeded node locks
1065
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1066
                               self.op.src_node_uuid])
1067
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1068
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1069
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1070
    # Release all unneeded group locks
1071
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1072
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1073

    
1074
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1075
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1076
      "Node locks differ from node resource locks"
1077

    
1078
    #### node related checks
1079

    
1080
    # check primary node
1081
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1082
    assert self.pnode is not None, \
1083
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1084
    if pnode.offline:
1085
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1086
                                 pnode.name, errors.ECODE_STATE)
1087
    if pnode.drained:
1088
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1089
                                 pnode.name, errors.ECODE_STATE)
1090
    if not pnode.vm_capable:
1091
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1092
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1093

    
1094
    self.secondaries = []
1095

    
1096
    # Fill in any IPs from IP pools. This must happen here, because we need to
1097
    # know the nic's primary node, as specified by the iallocator
1098
    for idx, nic in enumerate(self.nics):
1099
      net_uuid = nic.network
1100
      if net_uuid is not None:
1101
        nobj = self.cfg.GetNetwork(net_uuid)
1102
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1103
        if netparams is None:
1104
          raise errors.OpPrereqError("No netparams found for network"
1105
                                     " %s. Probably not connected to"
1106
                                     " node's %s nodegroup" %
1107
                                     (nobj.name, self.pnode.name),
1108
                                     errors.ECODE_INVAL)
1109
        self.LogInfo("NIC/%d inherits netparams %s" %
1110
                     (idx, netparams.values()))
1111
        nic.nicparams = dict(netparams)
1112
        if nic.ip is not None:
1113
          if nic.ip.lower() == constants.NIC_IP_POOL:
1114
            try:
1115
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1116
            except errors.ReservationError:
1117
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1118
                                         " from the address pool" % idx,
1119
                                         errors.ECODE_STATE)
1120
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1121
          else:
1122
            try:
1123
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1124
                                 check=self.op.conflicts_check)
1125
            except errors.ReservationError:
1126
              raise errors.OpPrereqError("IP address %s already in use"
1127
                                         " or does not belong to network %s" %
1128
                                         (nic.ip, nobj.name),
1129
                                         errors.ECODE_NOTUNIQUE)
1130

    
1131
      # net is None, ip None or given
1132
      elif self.op.conflicts_check:
1133
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1134

    
1135
    # mirror node verification
1136
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1137
      if self.op.snode_uuid == pnode.uuid:
1138
        raise errors.OpPrereqError("The secondary node cannot be the"
1139
                                   " primary node", errors.ECODE_INVAL)
1140
      CheckNodeOnline(self, self.op.snode_uuid)
1141
      CheckNodeNotDrained(self, self.op.snode_uuid)
1142
      CheckNodeVmCapable(self, self.op.snode_uuid)
1143
      self.secondaries.append(self.op.snode_uuid)
1144

    
1145
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1146
      if pnode.group != snode.group:
1147
        self.LogWarning("The primary and secondary nodes are in two"
1148
                        " different node groups; the disk parameters"
1149
                        " from the first disk's node group will be"
1150
                        " used")
1151

    
1152
    nodes = [pnode]
1153
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1154
      nodes.append(snode)
1155
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1156
    excl_stor = compat.any(map(has_es, nodes))
1157
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1158
      raise errors.OpPrereqError("Disk template %s not supported with"
1159
                                 " exclusive storage" % self.op.disk_template,
1160
                                 errors.ECODE_STATE)
1161
    for disk in self.disks:
1162
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1163

    
1164
    node_uuids = [pnode.uuid] + self.secondaries
1165

    
1166
    if not self.adopt_disks:
1167
      if self.op.disk_template == constants.DT_RBD:
1168
        # _CheckRADOSFreeSpace() is just a placeholder.
1169
        # Any function that checks prerequisites can be placed here.
1170
        # Check if there is enough space on the RADOS cluster.
1171
        CheckRADOSFreeSpace()
1172
      elif self.op.disk_template == constants.DT_EXT:
1173
        # FIXME: Function that checks prereqs if needed
1174
        pass
1175
      elif self.op.disk_template in constants.DTS_LVM:
1176
        # Check lv size requirements, if not adopting
1177
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1178
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1179
      else:
1180
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1181
        pass
1182

    
1183
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1184
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1185
                                disk[constants.IDISK_ADOPT])
1186
                     for disk in self.disks])
1187
      if len(all_lvs) != len(self.disks):
1188
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1189
                                   errors.ECODE_INVAL)
1190
      for lv_name in all_lvs:
1191
        try:
1192
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1193
          # to ReserveLV uses the same syntax
1194
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1195
        except errors.ReservationError:
1196
          raise errors.OpPrereqError("LV named %s used by another instance" %
1197
                                     lv_name, errors.ECODE_NOTUNIQUE)
1198

    
1199
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1200
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1201

    
1202
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1203
                                       vg_names.payload.keys())[pnode.uuid]
1204
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1205
      node_lvs = node_lvs.payload
1206

    
1207
      delta = all_lvs.difference(node_lvs.keys())
1208
      if delta:
1209
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1210
                                   utils.CommaJoin(delta),
1211
                                   errors.ECODE_INVAL)
1212
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1213
      if online_lvs:
1214
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1215
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1216
                                   errors.ECODE_STATE)
1217
      # update the size of disk based on what is found
1218
      for dsk in self.disks:
1219
        dsk[constants.IDISK_SIZE] = \
1220
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1221
                                        dsk[constants.IDISK_ADOPT])][0]))
1222

    
1223
    elif self.op.disk_template == constants.DT_BLOCK:
1224
      # Normalize and de-duplicate device paths
1225
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1226
                       for disk in self.disks])
1227
      if len(all_disks) != len(self.disks):
1228
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1229
                                   errors.ECODE_INVAL)
1230
      baddisks = [d for d in all_disks
1231
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1232
      if baddisks:
1233
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1234
                                   " cannot be adopted" %
1235
                                   (utils.CommaJoin(baddisks),
1236
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1237
                                   errors.ECODE_INVAL)
1238

    
1239
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1240
                                            list(all_disks))[pnode.uuid]
1241
      node_disks.Raise("Cannot get block device information from node %s" %
1242
                       pnode.name)
1243
      node_disks = node_disks.payload
1244
      delta = all_disks.difference(node_disks.keys())
1245
      if delta:
1246
        raise errors.OpPrereqError("Missing block device(s): %s" %
1247
                                   utils.CommaJoin(delta),
1248
                                   errors.ECODE_INVAL)
1249
      for dsk in self.disks:
1250
        dsk[constants.IDISK_SIZE] = \
1251
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1252

    
1253
    # Check disk access param to be compatible with specified hypervisor
1254
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1255
    node_group = self.cfg.GetNodeGroup(node_info.group)
1256
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1257
    access_type = disk_params[self.op.disk_template].get(
1258
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1259
    )
1260

    
1261
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1262
                                            self.op.disk_template,
1263
                                            access_type):
1264
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1265
                                 " used with %s disk access param" %
1266
                                 (self.op.hypervisor, access_type),
1267
                                  errors.ECODE_STATE)
1268

    
1269
    # Verify instance specs
1270
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1271
    ispec = {
1272
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1273
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1274
      constants.ISPEC_DISK_COUNT: len(self.disks),
1275
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1276
                                  for disk in self.disks],
1277
      constants.ISPEC_NIC_COUNT: len(self.nics),
1278
      constants.ISPEC_SPINDLE_USE: spindle_use,
1279
      }
1280

    
1281
    group_info = self.cfg.GetNodeGroup(pnode.group)
1282
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1283
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1284
                                               self.op.disk_template)
1285
    if not self.op.ignore_ipolicy and res:
1286
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1287
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1288
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1289

    
1290
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1291

    
1292
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1293
    # check OS parameters (remotely)
1294
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1295

    
1296
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1297

    
1298
    #TODO: _CheckExtParams (remotely)
1299
    # Check parameters for extstorage
1300

    
1301
    # memory check on primary node
1302
    #TODO(dynmem): use MINMEM for checking
1303
    if self.op.start:
1304
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1305
                                self.op.hvparams)
1306
      CheckNodeFreeMemory(self, self.pnode.uuid,
1307
                          "creating instance %s" % self.op.instance_name,
1308
                          self.be_full[constants.BE_MAXMEM],
1309
                          self.op.hypervisor, hvfull)
1310

    
1311
    self.dry_run_result = list(node_uuids)
1312

    
1313
  def Exec(self, feedback_fn):
1314
    """Create and add the instance to the cluster.
1315

1316
    """
1317
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1318
                self.owned_locks(locking.LEVEL_NODE)), \
1319
      "Node locks differ from node resource locks"
1320
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1321

    
1322
    ht_kind = self.op.hypervisor
1323
    if ht_kind in constants.HTS_REQ_PORT:
1324
      network_port = self.cfg.AllocatePort()
1325
    else:
1326
      network_port = None
1327

    
1328
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1329

    
1330
    # This is ugly but we got a chicken-egg problem here
1331
    # We can only take the group disk parameters, as the instance
1332
    # has no disks yet (we are generating them right here).
1333
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1334
    disks = GenerateDiskTemplate(self,
1335
                                 self.op.disk_template,
1336
                                 instance_uuid, self.pnode.uuid,
1337
                                 self.secondaries,
1338
                                 self.disks,
1339
                                 self.instance_file_storage_dir,
1340
                                 self.op.file_driver,
1341
                                 0,
1342
                                 feedback_fn,
1343
                                 self.cfg.GetGroupDiskParams(nodegroup))
1344

    
1345
    iobj = objects.Instance(name=self.op.instance_name,
1346
                            uuid=instance_uuid,
1347
                            os=self.op.os_type,
1348
                            primary_node=self.pnode.uuid,
1349
                            nics=self.nics, disks=disks,
1350
                            disk_template=self.op.disk_template,
1351
                            disks_active=False,
1352
                            admin_state=constants.ADMINST_DOWN,
1353
                            network_port=network_port,
1354
                            beparams=self.op.beparams,
1355
                            hvparams=self.op.hvparams,
1356
                            hypervisor=self.op.hypervisor,
1357
                            osparams=self.op.osparams,
1358
                            osparams_private=self.op.osparams_private,
1359
                            )
1360

    
1361
    if self.op.tags:
1362
      for tag in self.op.tags:
1363
        iobj.AddTag(tag)
1364

    
1365
    if self.adopt_disks:
1366
      if self.op.disk_template == constants.DT_PLAIN:
1367
        # rename LVs to the newly-generated names; we need to construct
1368
        # 'fake' LV disks with the old data, plus the new unique_id
1369
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1370
        rename_to = []
1371
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1372
          rename_to.append(t_dsk.logical_id)
1373
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1374
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1375
                                               zip(tmp_disks, rename_to))
1376
        result.Raise("Failed to rename adoped LVs")
1377
    else:
1378
      feedback_fn("* creating instance disks...")
1379
      try:
1380
        CreateDisks(self, iobj)
1381
      except errors.OpExecError:
1382
        self.LogWarning("Device creation failed")
1383
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1384
        raise
1385

    
1386
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1387

    
1388
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1389

    
1390
    # Declare that we don't want to remove the instance lock anymore, as we've
1391
    # added the instance to the config
1392
    del self.remove_locks[locking.LEVEL_INSTANCE]
1393

    
1394
    if self.op.mode == constants.INSTANCE_IMPORT:
1395
      # Release unused nodes
1396
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1397
    else:
1398
      # Release all nodes
1399
      ReleaseLocks(self, locking.LEVEL_NODE)
1400

    
1401
    disk_abort = False
1402
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1403
      feedback_fn("* wiping instance disks...")
1404
      try:
1405
        WipeDisks(self, iobj)
1406
      except errors.OpExecError, err:
1407
        logging.exception("Wiping disks failed")
1408
        self.LogWarning("Wiping instance disks failed (%s)", err)
1409
        disk_abort = True
1410

    
1411
    if disk_abort:
1412
      # Something is already wrong with the disks, don't do anything else
1413
      pass
1414
    elif self.op.wait_for_sync:
1415
      disk_abort = not WaitForSync(self, iobj)
1416
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1417
      # make sure the disks are not degraded (still sync-ing is ok)
1418
      feedback_fn("* checking mirrors status")
1419
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1420
    else:
1421
      disk_abort = False
1422

    
1423
    if disk_abort:
1424
      RemoveDisks(self, iobj)
1425
      self.cfg.RemoveInstance(iobj.uuid)
1426
      # Make sure the instance lock gets removed
1427
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1428
      raise errors.OpExecError("There are some degraded disks for"
1429
                               " this instance")
1430

    
1431
    # instance disks are now active
1432
    iobj.disks_active = True
1433

    
1434
    # Release all node resource locks
1435
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1436

    
1437
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1438
      if self.op.mode == constants.INSTANCE_CREATE:
1439
        if not self.op.no_install:
1440
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1441
                        not self.op.wait_for_sync)
1442
          if pause_sync:
1443
            feedback_fn("* pausing disk sync to install instance OS")
1444
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1445
                                                              (iobj.disks,
1446
                                                               iobj), True)
1447
            for idx, success in enumerate(result.payload):
1448
              if not success:
1449
                logging.warn("pause-sync of instance %s for disk %d failed",
1450
                             self.op.instance_name, idx)
1451

    
1452
          feedback_fn("* running the instance OS create scripts...")
1453
          # FIXME: pass debug option from opcode to backend
1454
          os_add_result = \
1455
            self.rpc.call_instance_os_add(self.pnode.uuid,
1456
                                          (iobj, self.op.osparams_secret),
1457
                                          False,
1458
                                          self.op.debug_level)
1459
          if pause_sync:
1460
            feedback_fn("* resuming disk sync")
1461
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1462
                                                              (iobj.disks,
1463
                                                               iobj), False)
1464
            for idx, success in enumerate(result.payload):
1465
              if not success:
1466
                logging.warn("resume-sync of instance %s for disk %d failed",
1467
                             self.op.instance_name, idx)
1468

    
1469
          os_add_result.Raise("Could not add os for instance %s"
1470
                              " on node %s" % (self.op.instance_name,
1471
                                               self.pnode.name))
1472

    
1473
      else:
1474
        if self.op.mode == constants.INSTANCE_IMPORT:
1475
          feedback_fn("* running the instance OS import scripts...")
1476

    
1477
          transfers = []
1478

    
1479
          for idx, image in enumerate(self.src_images):
1480
            if not image:
1481
              continue
1482

    
1483
            # FIXME: pass debug option from opcode to backend
1484
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1485
                                               constants.IEIO_FILE, (image, ),
1486
                                               constants.IEIO_SCRIPT,
1487
                                               ((iobj.disks[idx], iobj), idx),
1488
                                               None)
1489
            transfers.append(dt)
1490

    
1491
          import_result = \
1492
            masterd.instance.TransferInstanceData(self, feedback_fn,
1493
                                                  self.op.src_node_uuid,
1494
                                                  self.pnode.uuid,
1495
                                                  self.pnode.secondary_ip,
1496
                                                  self.op.compress,
1497
                                                  iobj, transfers)
1498
          if not compat.all(import_result):
1499
            self.LogWarning("Some disks for instance %s on node %s were not"
1500
                            " imported successfully" % (self.op.instance_name,
1501
                                                        self.pnode.name))
1502

    
1503
          rename_from = self._old_instance_name
1504

    
1505
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1506
          feedback_fn("* preparing remote import...")
1507
          # The source cluster will stop the instance before attempting to make
1508
          # a connection. In some cases stopping an instance can take a long
1509
          # time, hence the shutdown timeout is added to the connection
1510
          # timeout.
1511
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1512
                             self.op.source_shutdown_timeout)
1513
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1514

    
1515
          assert iobj.primary_node == self.pnode.uuid
1516
          disk_results = \
1517
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1518
                                          self.source_x509_ca,
1519
                                          self._cds, self.op.compress, timeouts)
1520
          if not compat.all(disk_results):
1521
            # TODO: Should the instance still be started, even if some disks
1522
            # failed to import (valid for local imports, too)?
1523
            self.LogWarning("Some disks for instance %s on node %s were not"
1524
                            " imported successfully" % (self.op.instance_name,
1525
                                                        self.pnode.name))
1526

    
1527
          rename_from = self.source_instance_name
1528

    
1529
        else:
1530
          # also checked in the prereq part
1531
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1532
                                       % self.op.mode)
1533

    
1534
        # Run rename script on newly imported instance
1535
        assert iobj.name == self.op.instance_name
1536
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1537
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1538
                                                   rename_from,
1539
                                                   self.op.debug_level)
1540
        result.Warn("Failed to run rename script for %s on node %s" %
1541
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1542

    
1543
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1544

    
1545
    if self.op.start:
1546
      iobj.admin_state = constants.ADMINST_UP
1547
      self.cfg.Update(iobj, feedback_fn)
1548
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1549
                   self.pnode.name)
1550
      feedback_fn("* starting instance...")
1551
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1552
                                            False, self.op.reason)
1553
      result.Raise("Could not start instance")
1554

    
1555
    return list(iobj.all_nodes)
1556

    
1557

    
1558
class LUInstanceRename(LogicalUnit):
1559
  """Rename an instance.
1560

1561
  """
1562
  HPATH = "instance-rename"
1563
  HTYPE = constants.HTYPE_INSTANCE
1564

    
1565
  def CheckArguments(self):
1566
    """Check arguments.
1567

1568
    """
1569
    if self.op.ip_check and not self.op.name_check:
1570
      # TODO: make the ip check more flexible and not depend on the name check
1571
      raise errors.OpPrereqError("IP address check requires a name check",
1572
                                 errors.ECODE_INVAL)
1573

    
1574
  def BuildHooksEnv(self):
1575
    """Build hooks env.
1576

1577
    This runs on master, primary and secondary nodes of the instance.
1578

1579
    """
1580
    env = BuildInstanceHookEnvByObject(self, self.instance)
1581
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1582
    return env
1583

    
1584
  def BuildHooksNodes(self):
1585
    """Build hooks nodes.
1586

1587
    """
1588
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1589
    return (nl, nl)
1590

    
1591
  def CheckPrereq(self):
1592
    """Check prerequisites.
1593

1594
    This checks that the instance is in the cluster and is not running.
1595

1596
    """
1597
    (self.op.instance_uuid, self.op.instance_name) = \
1598
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1599
                                self.op.instance_name)
1600
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1601
    assert instance is not None
1602

    
1603
    # It should actually not happen that an instance is running with a disabled
1604
    # disk template, but in case it does, the renaming of file-based instances
1605
    # will fail horribly. Thus, we test it before.
1606
    if (instance.disk_template in constants.DTS_FILEBASED and
1607
        self.op.new_name != instance.name):
1608
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1609
                               instance.disk_template)
1610

    
1611
    CheckNodeOnline(self, instance.primary_node)
1612
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1613
                       msg="cannot rename")
1614
    self.instance = instance
1615

    
1616
    new_name = self.op.new_name
1617
    if self.op.name_check:
1618
      hostname = _CheckHostnameSane(self, new_name)
1619
      new_name = self.op.new_name = hostname.name
1620
      if (self.op.ip_check and
1621
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1622
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1623
                                   (hostname.ip, new_name),
1624
                                   errors.ECODE_NOTUNIQUE)
1625

    
1626
    instance_names = [inst.name for
1627
                      inst in self.cfg.GetAllInstancesInfo().values()]
1628
    if new_name in instance_names and new_name != instance.name:
1629
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1630
                                 new_name, errors.ECODE_EXISTS)
1631

    
1632
  def Exec(self, feedback_fn):
1633
    """Rename the instance.
1634

1635
    """
1636
    old_name = self.instance.name
1637

    
1638
    rename_file_storage = False
1639
    if (self.instance.disk_template in (constants.DT_FILE,
1640
                                        constants.DT_SHARED_FILE) and
1641
        self.op.new_name != self.instance.name):
1642
      old_file_storage_dir = os.path.dirname(
1643
                               self.instance.disks[0].logical_id[1])
1644
      rename_file_storage = True
1645

    
1646
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1647
    # Change the instance lock. This is definitely safe while we hold the BGL.
1648
    # Otherwise the new lock would have to be added in acquired mode.
1649
    assert self.REQ_BGL
1650
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1651
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1652
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1653

    
1654
    # re-read the instance from the configuration after rename
1655
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1656

    
1657
    if rename_file_storage:
1658
      new_file_storage_dir = os.path.dirname(
1659
                               renamed_inst.disks[0].logical_id[1])
1660
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1661
                                                     old_file_storage_dir,
1662
                                                     new_file_storage_dir)
1663
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1664
                   " (but the instance has been renamed in Ganeti)" %
1665
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1666
                    old_file_storage_dir, new_file_storage_dir))
1667

    
1668
    StartInstanceDisks(self, renamed_inst, None)
1669
    # update info on disks
1670
    info = GetInstanceInfoText(renamed_inst)
1671
    for (idx, disk) in enumerate(renamed_inst.disks):
1672
      for node_uuid in renamed_inst.all_nodes:
1673
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1674
                                                (disk, renamed_inst), info)
1675
        result.Warn("Error setting info on node %s for disk %s" %
1676
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1677
    try:
1678
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1679
                                                 renamed_inst, old_name,
1680
                                                 self.op.debug_level)
1681
      result.Warn("Could not run OS rename script for instance %s on node %s"
1682
                  " (but the instance has been renamed in Ganeti)" %
1683
                  (renamed_inst.name,
1684
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1685
                  self.LogWarning)
1686
    finally:
1687
      ShutdownInstanceDisks(self, renamed_inst)
1688

    
1689
    return renamed_inst.name
1690

    
1691

    
1692
class LUInstanceRemove(LogicalUnit):
1693
  """Remove an instance.
1694

1695
  """
1696
  HPATH = "instance-remove"
1697
  HTYPE = constants.HTYPE_INSTANCE
1698
  REQ_BGL = False
1699

    
1700
  def ExpandNames(self):
1701
    self._ExpandAndLockInstance()
1702
    self.needed_locks[locking.LEVEL_NODE] = []
1703
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1704
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1705

    
1706
  def DeclareLocks(self, level):
1707
    if level == locking.LEVEL_NODE:
1708
      self._LockInstancesNodes()
1709
    elif level == locking.LEVEL_NODE_RES:
1710
      # Copy node locks
1711
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1712
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1713

    
1714
  def BuildHooksEnv(self):
1715
    """Build hooks env.
1716

1717
    This runs on master, primary and secondary nodes of the instance.
1718

1719
    """
1720
    env = BuildInstanceHookEnvByObject(self, self.instance)
1721
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1722
    return env
1723

    
1724
  def BuildHooksNodes(self):
1725
    """Build hooks nodes.
1726

1727
    """
1728
    nl = [self.cfg.GetMasterNode()]
1729
    nl_post = list(self.instance.all_nodes) + nl
1730
    return (nl, nl_post)
1731

    
1732
  def CheckPrereq(self):
1733
    """Check prerequisites.
1734

1735
    This checks that the instance is in the cluster.
1736

1737
    """
1738
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1739
    assert self.instance is not None, \
1740
      "Cannot retrieve locked instance %s" % self.op.instance_name
1741

    
1742
  def Exec(self, feedback_fn):
1743
    """Remove the instance.
1744

1745
    """
1746
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1747
                 self.cfg.GetNodeName(self.instance.primary_node))
1748

    
1749
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1750
                                             self.instance,
1751
                                             self.op.shutdown_timeout,
1752
                                             self.op.reason)
1753
    if self.op.ignore_failures:
1754
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1755
    else:
1756
      result.Raise("Could not shutdown instance %s on node %s" %
1757
                   (self.instance.name,
1758
                    self.cfg.GetNodeName(self.instance.primary_node)))
1759

    
1760
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1761
            self.owned_locks(locking.LEVEL_NODE_RES))
1762
    assert not (set(self.instance.all_nodes) -
1763
                self.owned_locks(locking.LEVEL_NODE)), \
1764
      "Not owning correct locks"
1765

    
1766
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1767

    
1768

    
1769
class LUInstanceMove(LogicalUnit):
1770
  """Move an instance by data-copying.
1771

1772
  """
1773
  HPATH = "instance-move"
1774
  HTYPE = constants.HTYPE_INSTANCE
1775
  REQ_BGL = False
1776

    
1777
  def ExpandNames(self):
1778
    self._ExpandAndLockInstance()
1779
    (self.op.target_node_uuid, self.op.target_node) = \
1780
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1781
                            self.op.target_node)
1782
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1783
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1784
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1785

    
1786
  def DeclareLocks(self, level):
1787
    if level == locking.LEVEL_NODE:
1788
      self._LockInstancesNodes(primary_only=True)
1789
    elif level == locking.LEVEL_NODE_RES:
1790
      # Copy node locks
1791
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1792
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1793

    
1794
  def BuildHooksEnv(self):
1795
    """Build hooks env.
1796

1797
    This runs on master, primary and target nodes of the instance.
1798

1799
    """
1800
    env = {
1801
      "TARGET_NODE": self.op.target_node,
1802
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1803
      }
1804
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1805
    return env
1806

    
1807
  def BuildHooksNodes(self):
1808
    """Build hooks nodes.
1809

1810
    """
1811
    nl = [
1812
      self.cfg.GetMasterNode(),
1813
      self.instance.primary_node,
1814
      self.op.target_node_uuid,
1815
      ]
1816
    return (nl, nl)
1817

    
1818
  def CheckPrereq(self):
1819
    """Check prerequisites.
1820

1821
    This checks that the instance is in the cluster.
1822

1823
    """
1824
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1825
    assert self.instance is not None, \
1826
      "Cannot retrieve locked instance %s" % self.op.instance_name
1827

    
1828
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1829
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1830
                                 self.instance.disk_template,
1831
                                 errors.ECODE_STATE)
1832

    
1833
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1834
    assert target_node is not None, \
1835
      "Cannot retrieve locked node %s" % self.op.target_node
1836

    
1837
    self.target_node_uuid = target_node.uuid
1838
    if target_node.uuid == self.instance.primary_node:
1839
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1840
                                 (self.instance.name, target_node.name),
1841
                                 errors.ECODE_STATE)
1842

    
1843
    cluster = self.cfg.GetClusterInfo()
1844
    bep = cluster.FillBE(self.instance)
1845

    
1846
    for idx, dsk in enumerate(self.instance.disks):
1847
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1848
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1849
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1850
                                   " cannot copy" % idx, errors.ECODE_STATE)
1851

    
1852
    CheckNodeOnline(self, target_node.uuid)
1853
    CheckNodeNotDrained(self, target_node.uuid)
1854
    CheckNodeVmCapable(self, target_node.uuid)
1855
    group_info = self.cfg.GetNodeGroup(target_node.group)
1856
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1857
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1858
                           ignore=self.op.ignore_ipolicy)
1859

    
1860
    if self.instance.admin_state == constants.ADMINST_UP:
1861
      # check memory requirements on the target node
1862
      CheckNodeFreeMemory(
1863
          self, target_node.uuid, "failing over instance %s" %
1864
          self.instance.name, bep[constants.BE_MAXMEM],
1865
          self.instance.hypervisor,
1866
          cluster.hvparams[self.instance.hypervisor])
1867
    else:
1868
      self.LogInfo("Not checking memory on the secondary node as"
1869
                   " instance will not be started")
1870

    
1871
    # check bridge existance
1872
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1873

    
1874
  def Exec(self, feedback_fn):
1875
    """Move an instance.
1876

1877
    The move is done by shutting it down on its present node, copying
1878
    the data over (slow) and starting it on the new node.
1879

1880
    """
1881
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1882
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1883

    
1884
    self.LogInfo("Shutting down instance %s on source node %s",
1885
                 self.instance.name, source_node.name)
1886

    
1887
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1888
            self.owned_locks(locking.LEVEL_NODE_RES))
1889

    
1890
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1891
                                             self.op.shutdown_timeout,
1892
                                             self.op.reason)
1893
    if self.op.ignore_consistency:
1894
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1895
                  " anyway. Please make sure node %s is down. Error details" %
1896
                  (self.instance.name, source_node.name, source_node.name),
1897
                  self.LogWarning)
1898
    else:
1899
      result.Raise("Could not shutdown instance %s on node %s" %
1900
                   (self.instance.name, source_node.name))
1901

    
1902
    # create the target disks
1903
    try:
1904
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1905
    except errors.OpExecError:
1906
      self.LogWarning("Device creation failed")
1907
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1908
      raise
1909

    
1910
    errs = []
1911
    transfers = []
1912
    # activate, get path, create transfer jobs
1913
    for idx, disk in enumerate(self.instance.disks):
1914
      # FIXME: pass debug option from opcode to backend
1915
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1916
                                         constants.IEIO_RAW_DISK,
1917
                                         (disk, self.instance),
1918
                                         constants.IEIO_RAW_DISK,
1919
                                         (disk, self.instance),
1920
                                         None)
1921
      transfers.append(dt)
1922

    
1923
    import_result = \
1924
      masterd.instance.TransferInstanceData(self, feedback_fn,
1925
                                            source_node.uuid,
1926
                                            target_node.uuid,
1927
                                            target_node.secondary_ip,
1928
                                            self.op.compress,
1929
                                            self.instance, transfers)
1930
    if not compat.all(import_result):
1931
      errs.append("Failed to transfer instance data")
1932

    
1933
    if errs:
1934
      self.LogWarning("Some disks failed to copy, aborting")
1935
      try:
1936
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1937
      finally:
1938
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1939
        raise errors.OpExecError("Errors during disk copy: %s" %
1940
                                 (",".join(errs),))
1941

    
1942
    self.instance.primary_node = target_node.uuid
1943
    self.cfg.Update(self.instance, feedback_fn)
1944

    
1945
    self.LogInfo("Removing the disks on the original node")
1946
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1947

    
1948
    # Only start the instance if it's marked as up
1949
    if self.instance.admin_state == constants.ADMINST_UP:
1950
      self.LogInfo("Starting instance %s on node %s",
1951
                   self.instance.name, target_node.name)
1952

    
1953
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1954
                                          ignore_secondaries=True)
1955
      if not disks_ok:
1956
        ShutdownInstanceDisks(self, self.instance)
1957
        raise errors.OpExecError("Can't activate the instance's disks")
1958

    
1959
      result = self.rpc.call_instance_start(target_node.uuid,
1960
                                            (self.instance, None, None), False,
1961
                                            self.op.reason)
1962
      msg = result.fail_msg
1963
      if msg:
1964
        ShutdownInstanceDisks(self, self.instance)
1965
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1966
                                 (self.instance.name, target_node.name, msg))
1967

    
1968

    
1969
class LUInstanceMultiAlloc(NoHooksLU):
1970
  """Allocates multiple instances at the same time.
1971

1972
  """
1973
  REQ_BGL = False
1974

    
1975
  def CheckArguments(self):
1976
    """Check arguments.
1977

1978
    """
1979
    nodes = []
1980
    for inst in self.op.instances:
1981
      if inst.iallocator is not None:
1982
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1983
                                   " instance objects", errors.ECODE_INVAL)
1984
      nodes.append(bool(inst.pnode))
1985
      if inst.disk_template in constants.DTS_INT_MIRROR:
1986
        nodes.append(bool(inst.snode))
1987

    
1988
    has_nodes = compat.any(nodes)
1989
    if compat.all(nodes) ^ has_nodes:
1990
      raise errors.OpPrereqError("There are instance objects providing"
1991
                                 " pnode/snode while others do not",
1992
                                 errors.ECODE_INVAL)
1993

    
1994
    if not has_nodes and self.op.iallocator is None:
1995
      default_iallocator = self.cfg.GetDefaultIAllocator()
1996
      if default_iallocator:
1997
        self.op.iallocator = default_iallocator
1998
      else:
1999
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2000
                                   " given and no cluster-wide default"
2001
                                   " iallocator found; please specify either"
2002
                                   " an iallocator or nodes on the instances"
2003
                                   " or set a cluster-wide default iallocator",
2004
                                   errors.ECODE_INVAL)
2005

    
2006
    _CheckOpportunisticLocking(self.op)
2007

    
2008
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2009
    if dups:
2010
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2011
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2012

    
2013
  def ExpandNames(self):
2014
    """Calculate the locks.
2015

2016
    """
2017
    self.share_locks = ShareAll()
2018
    self.needed_locks = {
2019
      # iallocator will select nodes and even if no iallocator is used,
2020
      # collisions with LUInstanceCreate should be avoided
2021
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2022
      }
2023

    
2024
    if self.op.iallocator:
2025
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2026
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2027

    
2028
      if self.op.opportunistic_locking:
2029
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2030
    else:
2031
      nodeslist = []
2032
      for inst in self.op.instances:
2033
        (inst.pnode_uuid, inst.pnode) = \
2034
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2035
        nodeslist.append(inst.pnode_uuid)
2036
        if inst.snode is not None:
2037
          (inst.snode_uuid, inst.snode) = \
2038
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2039
          nodeslist.append(inst.snode_uuid)
2040

    
2041
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2042
      # Lock resources of instance's primary and secondary nodes (copy to
2043
      # prevent accidential modification)
2044
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2045

    
2046
  def DeclareLocks(self, level):
2047
    if level == locking.LEVEL_NODE_RES and \
2048
      self.opportunistic_locks[locking.LEVEL_NODE]:
2049
      # Even when using opportunistic locking, we require the same set of
2050
      # NODE_RES locks as we got NODE locks
2051
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2052
        self.owned_locks(locking.LEVEL_NODE)
2053

    
2054
  def CheckPrereq(self):
2055
    """Check prerequisite.
2056

2057
    """
2058
    if self.op.iallocator:
2059
      cluster = self.cfg.GetClusterInfo()
2060
      default_vg = self.cfg.GetVGName()
2061
      ec_id = self.proc.GetECId()
2062

    
2063
      if self.op.opportunistic_locking:
2064
        # Only consider nodes for which a lock is held
2065
        node_whitelist = self.cfg.GetNodeNames(
2066
                           list(self.owned_locks(locking.LEVEL_NODE)))
2067
      else:
2068
        node_whitelist = None
2069

    
2070
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2071
                                           _ComputeNics(op, cluster, None,
2072
                                                        self.cfg, ec_id),
2073
                                           _ComputeFullBeParams(op, cluster),
2074
                                           node_whitelist)
2075
               for op in self.op.instances]
2076

    
2077
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2078
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2079

    
2080
      ial.Run(self.op.iallocator)
2081

    
2082
      if not ial.success:
2083
        raise errors.OpPrereqError("Can't compute nodes using"
2084
                                   " iallocator '%s': %s" %
2085
                                   (self.op.iallocator, ial.info),
2086
                                   errors.ECODE_NORES)
2087

    
2088
      self.ia_result = ial.result
2089

    
2090
    if self.op.dry_run:
2091
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2092
        constants.JOB_IDS_KEY: [],
2093
        })
2094

    
2095
  def _ConstructPartialResult(self):
2096
    """Contructs the partial result.
2097

2098
    """
2099
    if self.op.iallocator:
2100
      (allocatable, failed_insts) = self.ia_result
2101
      allocatable_insts = map(compat.fst, allocatable)
2102
    else:
2103
      allocatable_insts = [op.instance_name for op in self.op.instances]
2104
      failed_insts = []
2105

    
2106
    return {
2107
      constants.ALLOCATABLE_KEY: allocatable_insts,
2108
      constants.FAILED_KEY: failed_insts,
2109
      }
2110

    
2111
  def Exec(self, feedback_fn):
2112
    """Executes the opcode.
2113

2114
    """
2115
    jobs = []
2116
    if self.op.iallocator:
2117
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2118
      (allocatable, failed) = self.ia_result
2119

    
2120
      for (name, node_names) in allocatable:
2121
        op = op2inst.pop(name)
2122

    
2123
        (op.pnode_uuid, op.pnode) = \
2124
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2125
        if len(node_names) > 1:
2126
          (op.snode_uuid, op.snode) = \
2127
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2128

    
2129
          jobs.append([op])
2130

    
2131
        missing = set(op2inst.keys()) - set(failed)
2132
        assert not missing, \
2133
          "Iallocator did return incomplete result: %s" % \
2134
          utils.CommaJoin(missing)
2135
    else:
2136
      jobs.extend([op] for op in self.op.instances)
2137

    
2138
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2139

    
2140

    
2141
class _InstNicModPrivate:
2142
  """Data structure for network interface modifications.
2143

2144
  Used by L{LUInstanceSetParams}.
2145

2146
  """
2147
  def __init__(self):
2148
    self.params = None
2149
    self.filled = None
2150

    
2151

    
2152
def _PrepareContainerMods(mods, private_fn):
2153
  """Prepares a list of container modifications by adding a private data field.
2154

2155
  @type mods: list of tuples; (operation, index, parameters)
2156
  @param mods: List of modifications
2157
  @type private_fn: callable or None
2158
  @param private_fn: Callable for constructing a private data field for a
2159
    modification
2160
  @rtype: list
2161

2162
  """
2163
  if private_fn is None:
2164
    fn = lambda: None
2165
  else:
2166
    fn = private_fn
2167

    
2168
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2169

    
2170

    
2171
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2172
  """Checks if nodes have enough physical CPUs
2173

2174
  This function checks if all given nodes have the needed number of
2175
  physical CPUs. In case any node has less CPUs or we cannot get the
2176
  information from the node, this function raises an OpPrereqError
2177
  exception.
2178

2179
  @type lu: C{LogicalUnit}
2180
  @param lu: a logical unit from which we get configuration data
2181
  @type node_uuids: C{list}
2182
  @param node_uuids: the list of node UUIDs to check
2183
  @type requested: C{int}
2184
  @param requested: the minimum acceptable number of physical CPUs
2185
  @type hypervisor_specs: list of pairs (string, dict of strings)
2186
  @param hypervisor_specs: list of hypervisor specifications in
2187
      pairs (hypervisor_name, hvparams)
2188
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2189
      or we cannot check the node
2190

2191
  """
2192
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2193
  for node_uuid in node_uuids:
2194
    info = nodeinfo[node_uuid]
2195
    node_name = lu.cfg.GetNodeName(node_uuid)
2196
    info.Raise("Cannot get current information from node %s" % node_name,
2197
               prereq=True, ecode=errors.ECODE_ENVIRON)
2198
    (_, _, (hv_info, )) = info.payload
2199
    num_cpus = hv_info.get("cpu_total", None)
2200
    if not isinstance(num_cpus, int):
2201
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2202
                                 " on node %s, result was '%s'" %
2203
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2204
    if requested > num_cpus:
2205
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2206
                                 "required" % (node_name, num_cpus, requested),
2207
                                 errors.ECODE_NORES)
2208

    
2209

    
2210
def GetItemFromContainer(identifier, kind, container):
2211
  """Return the item refered by the identifier.
2212

2213
  @type identifier: string
2214
  @param identifier: Item index or name or UUID
2215
  @type kind: string
2216
  @param kind: One-word item description
2217
  @type container: list
2218
  @param container: Container to get the item from
2219

2220
  """
2221
  # Index
2222
  try:
2223
    idx = int(identifier)
2224
    if idx == -1:
2225
      # Append
2226
      absidx = len(container) - 1
2227
    elif idx < 0:
2228
      raise IndexError("Not accepting negative indices other than -1")
2229
    elif idx > len(container):
2230
      raise IndexError("Got %s index %s, but there are only %s" %
2231
                       (kind, idx, len(container)))
2232
    else:
2233
      absidx = idx
2234
    return (absidx, container[idx])
2235
  except ValueError:
2236
    pass
2237

    
2238
  for idx, item in enumerate(container):
2239
    if item.uuid == identifier or item.name == identifier:
2240
      return (idx, item)
2241

    
2242
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2243
                             (kind, identifier), errors.ECODE_NOENT)
2244

    
2245

    
2246
def _ApplyContainerMods(kind, container, chgdesc, mods,
2247
                        create_fn, modify_fn, remove_fn,
2248
                        post_add_fn=None):
2249
  """Applies descriptions in C{mods} to C{container}.
2250

2251
  @type kind: string
2252
  @param kind: One-word item description
2253
  @type container: list
2254
  @param container: Container to modify
2255
  @type chgdesc: None or list
2256
  @param chgdesc: List of applied changes
2257
  @type mods: list
2258
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2259
  @type create_fn: callable
2260
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2261
    receives absolute item index, parameters and private data object as added
2262
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2263
    as list
2264
  @type modify_fn: callable
2265
  @param modify_fn: Callback for modifying an existing item
2266
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2267
    and private data object as added by L{_PrepareContainerMods}, returns
2268
    changes as list
2269
  @type remove_fn: callable
2270
  @param remove_fn: Callback on removing item; receives absolute item index,
2271
    item and private data object as added by L{_PrepareContainerMods}
2272
  @type post_add_fn: callable
2273
  @param post_add_fn: Callable for post-processing a newly created item after
2274
    it has been put into the container. It receives the index of the new item
2275
    and the new item as parameters.
2276

2277
  """
2278
  for (op, identifier, params, private) in mods:
2279
    changes = None
2280

    
2281
    if op == constants.DDM_ADD:
2282
      # Calculate where item will be added
2283
      # When adding an item, identifier can only be an index
2284
      try:
2285
        idx = int(identifier)
2286
      except ValueError:
2287
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2288
                                   " identifier for %s" % constants.DDM_ADD,
2289
                                   errors.ECODE_INVAL)
2290
      if idx == -1:
2291
        addidx = len(container)
2292
      else:
2293
        if idx < 0:
2294
          raise IndexError("Not accepting negative indices other than -1")
2295
        elif idx > len(container):
2296
          raise IndexError("Got %s index %s, but there are only %s" %
2297
                           (kind, idx, len(container)))
2298
        addidx = idx
2299

    
2300
      if create_fn is None:
2301
        item = params
2302
      else:
2303
        (item, changes) = create_fn(addidx, params, private)
2304

    
2305
      if idx == -1:
2306
        container.append(item)
2307
      else:
2308
        assert idx >= 0
2309
        assert idx <= len(container)
2310
        # list.insert does so before the specified index
2311
        container.insert(idx, item)
2312

    
2313
      if post_add_fn is not None:
2314
        post_add_fn(addidx, item)
2315

    
2316
    else:
2317
      # Retrieve existing item
2318
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2319

    
2320
      if op == constants.DDM_REMOVE:
2321
        assert not params
2322

    
2323
        changes = [("%s/%s" % (kind, absidx), "remove")]
2324

    
2325
        if remove_fn is not None:
2326
          msg = remove_fn(absidx, item, private)
2327
          if msg:
2328
            changes.append(("%s/%s" % (kind, absidx), msg))
2329

    
2330
        assert container[absidx] == item
2331
        del container[absidx]
2332
      elif op == constants.DDM_MODIFY:
2333
        if modify_fn is not None:
2334
          changes = modify_fn(absidx, item, params, private)
2335
      else:
2336
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2337

    
2338
    assert _TApplyContModsCbChanges(changes)
2339

    
2340
    if not (chgdesc is None or changes is None):
2341
      chgdesc.extend(changes)
2342

    
2343

    
2344
def _UpdateIvNames(base_index, disks):
2345
  """Updates the C{iv_name} attribute of disks.
2346

2347
  @type disks: list of L{objects.Disk}
2348

2349
  """
2350
  for (idx, disk) in enumerate(disks):
2351
    disk.iv_name = "disk/%s" % (base_index + idx, )
2352

    
2353

    
2354
class LUInstanceSetParams(LogicalUnit):
2355
  """Modifies an instances's parameters.
2356

2357
  """
2358
  HPATH = "instance-modify"
2359
  HTYPE = constants.HTYPE_INSTANCE
2360
  REQ_BGL = False
2361

    
2362
  @staticmethod
2363
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2364
    assert ht.TList(mods)
2365
    assert not mods or len(mods[0]) in (2, 3)
2366

    
2367
    if mods and len(mods[0]) == 2:
2368
      result = []
2369

    
2370
      addremove = 0
2371
      for op, params in mods:
2372
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2373
          result.append((op, -1, params))
2374
          addremove += 1
2375

    
2376
          if addremove > 1:
2377
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2378
                                       " supported at a time" % kind,
2379
                                       errors.ECODE_INVAL)
2380
        else:
2381
          result.append((constants.DDM_MODIFY, op, params))
2382

    
2383
      assert verify_fn(result)
2384
    else:
2385
      result = mods
2386

    
2387
    return result
2388

    
2389
  @staticmethod
2390
  def _CheckMods(kind, mods, key_types, item_fn):
2391
    """Ensures requested disk/NIC modifications are valid.
2392

2393
    """
2394
    for (op, _, params) in mods:
2395
      assert ht.TDict(params)
2396

    
2397
      # If 'key_types' is an empty dict, we assume we have an
2398
      # 'ext' template and thus do not ForceDictType
2399
      if key_types:
2400
        utils.ForceDictType(params, key_types)
2401

    
2402
      if op == constants.DDM_REMOVE:
2403
        if params:
2404
          raise errors.OpPrereqError("No settings should be passed when"
2405
                                     " removing a %s" % kind,
2406
                                     errors.ECODE_INVAL)
2407
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2408
        item_fn(op, params)
2409
      else:
2410
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2411

    
2412
  def _VerifyDiskModification(self, op, params, excl_stor):
2413
    """Verifies a disk modification.
2414

2415
    """
2416
    if op == constants.DDM_ADD:
2417
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2418
      if mode not in constants.DISK_ACCESS_SET:
2419
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2420
                                   errors.ECODE_INVAL)
2421

    
2422
      size = params.get(constants.IDISK_SIZE, None)
2423
      if size is None:
2424
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2425
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2426
      size = int(size)
2427

    
2428
      params[constants.IDISK_SIZE] = size
2429
      name = params.get(constants.IDISK_NAME, None)
2430
      if name is not None and name.lower() == constants.VALUE_NONE:
2431
        params[constants.IDISK_NAME] = None
2432

    
2433
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2434

    
2435
    elif op == constants.DDM_MODIFY:
2436
      if constants.IDISK_SIZE in params:
2437
        raise errors.OpPrereqError("Disk size change not possible, use"
2438
                                   " grow-disk", errors.ECODE_INVAL)
2439

    
2440
      # Disk modification supports changing only the disk name and mode.
2441
      # Changing arbitrary parameters is allowed only for ext disk template",
2442
      if self.instance.disk_template != constants.DT_EXT:
2443
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2444

    
2445
      name = params.get(constants.IDISK_NAME, None)
2446
      if name is not None and name.lower() == constants.VALUE_NONE:
2447
        params[constants.IDISK_NAME] = None
2448

    
2449
  @staticmethod
2450
  def _VerifyNicModification(op, params):
2451
    """Verifies a network interface modification.
2452

2453
    """
2454
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2455
      ip = params.get(constants.INIC_IP, None)
2456
      name = params.get(constants.INIC_NAME, None)
2457
      req_net = params.get(constants.INIC_NETWORK, None)
2458
      link = params.get(constants.NIC_LINK, None)
2459
      mode = params.get(constants.NIC_MODE, None)
2460
      if name is not None and name.lower() == constants.VALUE_NONE:
2461
        params[constants.INIC_NAME] = None
2462
      if req_net is not None:
2463
        if req_net.lower() == constants.VALUE_NONE:
2464
          params[constants.INIC_NETWORK] = None
2465
          req_net = None
2466
        elif link is not None or mode is not None:
2467
          raise errors.OpPrereqError("If network is given"
2468
                                     " mode or link should not",
2469
                                     errors.ECODE_INVAL)
2470

    
2471
      if op == constants.DDM_ADD:
2472
        macaddr = params.get(constants.INIC_MAC, None)
2473
        if macaddr is None:
2474
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2475

    
2476
      if ip is not None:
2477
        if ip.lower() == constants.VALUE_NONE:
2478
          params[constants.INIC_IP] = None
2479
        else:
2480
          if ip.lower() == constants.NIC_IP_POOL:
2481
            if op == constants.DDM_ADD and req_net is None:
2482
              raise errors.OpPrereqError("If ip=pool, parameter network"
2483
                                         " cannot be none",
2484
                                         errors.ECODE_INVAL)
2485
          else:
2486
            if not netutils.IPAddress.IsValid(ip):
2487
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2488
                                         errors.ECODE_INVAL)
2489

    
2490
      if constants.INIC_MAC in params:
2491
        macaddr = params[constants.INIC_MAC]
2492
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2493
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2494

    
2495
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2496
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2497
                                     " modifying an existing NIC",
2498
                                     errors.ECODE_INVAL)
2499

    
2500
  def CheckArguments(self):
2501
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2502
            self.op.hvparams or self.op.beparams or self.op.os_name or
2503
            self.op.osparams or self.op.offline is not None or
2504
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private):
2505
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2506

    
2507
    if self.op.hvparams:
2508
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2509
                           "hypervisor", "instance", "cluster")
2510

    
2511
    self.op.disks = self._UpgradeDiskNicMods(
2512
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2513
    self.op.nics = self._UpgradeDiskNicMods(
2514
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2515

    
2516
    if self.op.disks and self.op.disk_template is not None:
2517
      raise errors.OpPrereqError("Disk template conversion and other disk"
2518
                                 " changes not supported at the same time",
2519
                                 errors.ECODE_INVAL)
2520

    
2521
    if (self.op.disk_template and
2522
        self.op.disk_template in constants.DTS_INT_MIRROR and
2523
        self.op.remote_node is None):
2524
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2525
                                 " one requires specifying a secondary node",
2526
                                 errors.ECODE_INVAL)
2527

    
2528
    # Check NIC modifications
2529
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2530
                    self._VerifyNicModification)
2531

    
2532
    if self.op.pnode:
2533
      (self.op.pnode_uuid, self.op.pnode) = \
2534
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2535

    
2536
  def ExpandNames(self):
2537
    self._ExpandAndLockInstance()
2538
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2539
    # Can't even acquire node locks in shared mode as upcoming changes in
2540
    # Ganeti 2.6 will start to modify the node object on disk conversion
2541
    self.needed_locks[locking.LEVEL_NODE] = []
2542
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2543
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2544
    # Look node group to look up the ipolicy
2545
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2546

    
2547
  def DeclareLocks(self, level):
2548
    if level == locking.LEVEL_NODEGROUP:
2549
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2550
      # Acquire locks for the instance's nodegroups optimistically. Needs
2551
      # to be verified in CheckPrereq
2552
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2553
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2554
    elif level == locking.LEVEL_NODE:
2555
      self._LockInstancesNodes()
2556
      if self.op.disk_template and self.op.remote_node:
2557
        (self.op.remote_node_uuid, self.op.remote_node) = \
2558
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2559
                                self.op.remote_node)
2560
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2561
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2562
      # Copy node locks
2563
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2564
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2565

    
2566
  def BuildHooksEnv(self):
2567
    """Build hooks env.
2568

2569
    This runs on the master, primary and secondaries.
2570

2571
    """
2572
    args = {}
2573
    if constants.BE_MINMEM in self.be_new:
2574
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2575
    if constants.BE_MAXMEM in self.be_new:
2576
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2577
    if constants.BE_VCPUS in self.be_new:
2578
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2579
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2580
    # information at all.
2581

    
2582
    if self._new_nics is not None:
2583
      nics = []
2584

    
2585
      for nic in self._new_nics:
2586
        n = copy.deepcopy(nic)
2587
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2588
        n.nicparams = nicparams
2589
        nics.append(NICToTuple(self, n))
2590

    
2591
      args["nics"] = nics
2592

    
2593
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2594
    if self.op.disk_template:
2595
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2596
    if self.op.runtime_mem:
2597
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2598

    
2599
    return env
2600

    
2601
  def BuildHooksNodes(self):
2602
    """Build hooks nodes.
2603

2604
    """
2605
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2606
    return (nl, nl)
2607

    
2608
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2609
                              old_params, cluster, pnode_uuid):
2610

    
2611
    update_params_dict = dict([(key, params[key])
2612
                               for key in constants.NICS_PARAMETERS
2613
                               if key in params])
2614

    
2615
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2616
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2617

    
2618
    new_net_uuid = None
2619
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2620
    if new_net_uuid_or_name:
2621
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2622
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2623

    
2624
    if old_net_uuid:
2625
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2626

    
2627
    if new_net_uuid:
2628
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2629
      if not netparams:
2630
        raise errors.OpPrereqError("No netparams found for the network"
2631
                                   " %s, probably not connected" %
2632
                                   new_net_obj.name, errors.ECODE_INVAL)
2633
      new_params = dict(netparams)
2634
    else:
2635
      new_params = GetUpdatedParams(old_params, update_params_dict)
2636

    
2637
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2638

    
2639
    new_filled_params = cluster.SimpleFillNIC(new_params)
2640
    objects.NIC.CheckParameterSyntax(new_filled_params)
2641

    
2642
    new_mode = new_filled_params[constants.NIC_MODE]
2643
    if new_mode == constants.NIC_MODE_BRIDGED:
2644
      bridge = new_filled_params[constants.NIC_LINK]
2645
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2646
      if msg:
2647
        msg = "Error checking bridges on node '%s': %s" % \
2648
                (self.cfg.GetNodeName(pnode_uuid), msg)
2649
        if self.op.force:
2650
          self.warn.append(msg)
2651
        else:
2652
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2653

    
2654
    elif new_mode == constants.NIC_MODE_ROUTED:
2655
      ip = params.get(constants.INIC_IP, old_ip)
2656
      if ip is None:
2657
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2658
                                   " on a routed NIC", errors.ECODE_INVAL)
2659

    
2660
    elif new_mode == constants.NIC_MODE_OVS:
2661
      # TODO: check OVS link
2662
      self.LogInfo("OVS links are currently not checked for correctness")
2663

    
2664
    if constants.INIC_MAC in params:
2665
      mac = params[constants.INIC_MAC]
2666
      if mac is None:
2667
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2668
                                   errors.ECODE_INVAL)
2669
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2670
        # otherwise generate the MAC address
2671
        params[constants.INIC_MAC] = \
2672
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2673
      else:
2674
        # or validate/reserve the current one
2675
        try:
2676
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2677
        except errors.ReservationError:
2678
          raise errors.OpPrereqError("MAC address '%s' already in use"
2679
                                     " in cluster" % mac,
2680
                                     errors.ECODE_NOTUNIQUE)
2681
    elif new_net_uuid != old_net_uuid:
2682

    
2683
      def get_net_prefix(net_uuid):
2684
        mac_prefix = None
2685
        if net_uuid:
2686
          nobj = self.cfg.GetNetwork(net_uuid)
2687
          mac_prefix = nobj.mac_prefix
2688

    
2689
        return mac_prefix
2690

    
2691
      new_prefix = get_net_prefix(new_net_uuid)
2692
      old_prefix = get_net_prefix(old_net_uuid)
2693
      if old_prefix != new_prefix:
2694
        params[constants.INIC_MAC] = \
2695
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2696

    
2697
    # if there is a change in (ip, network) tuple
2698
    new_ip = params.get(constants.INIC_IP, old_ip)
2699
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2700
      if new_ip:
2701
        # if IP is pool then require a network and generate one IP
2702
        if new_ip.lower() == constants.NIC_IP_POOL:
2703
          if new_net_uuid:
2704
            try:
2705
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2706
            except errors.ReservationError:
2707
              raise errors.OpPrereqError("Unable to get a free IP"
2708
                                         " from the address pool",
2709
                                         errors.ECODE_STATE)
2710
            self.LogInfo("Chose IP %s from network %s",
2711
                         new_ip,
2712
                         new_net_obj.name)
2713
            params[constants.INIC_IP] = new_ip
2714
          else:
2715
            raise errors.OpPrereqError("ip=pool, but no network found",
2716
                                       errors.ECODE_INVAL)
2717
        # Reserve new IP if in the new network if any
2718
        elif new_net_uuid:
2719
          try:
2720
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2721
                               check=self.op.conflicts_check)
2722
            self.LogInfo("Reserving IP %s in network %s",
2723
                         new_ip, new_net_obj.name)
2724
          except errors.ReservationError:
2725
            raise errors.OpPrereqError("IP %s not available in network %s" %
2726
                                       (new_ip, new_net_obj.name),
2727
                                       errors.ECODE_NOTUNIQUE)
2728
        # new network is None so check if new IP is a conflicting IP
2729
        elif self.op.conflicts_check:
2730
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2731

    
2732
      # release old IP if old network is not None
2733
      if old_ip and old_net_uuid:
2734
        try:
2735
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2736
        except errors.AddressPoolError:
2737
          logging.warning("Release IP %s not contained in network %s",
2738
                          old_ip, old_net_obj.name)
2739

    
2740
    # there are no changes in (ip, network) tuple and old network is not None
2741
    elif (old_net_uuid is not None and
2742
          (req_link is not None or req_mode is not None)):
2743
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2744
                                 " a NIC that is connected to a network",
2745
                                 errors.ECODE_INVAL)
2746

    
2747
    private.params = new_params
2748
    private.filled = new_filled_params
2749

    
2750
  def _PreCheckDiskTemplate(self, pnode_info):
2751
    """CheckPrereq checks related to a new disk template."""
2752
    # Arguments are passed to avoid configuration lookups
2753
    pnode_uuid = self.instance.primary_node
2754
    if self.instance.disk_template == self.op.disk_template:
2755
      raise errors.OpPrereqError("Instance already has disk template %s" %
2756
                                 self.instance.disk_template,
2757
                                 errors.ECODE_INVAL)
2758

    
2759
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2760
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2761
                                 " cluster." % self.op.disk_template)
2762

    
2763
    if (self.instance.disk_template,
2764
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2765
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2766
                                 " %s to %s" % (self.instance.disk_template,
2767
                                                self.op.disk_template),
2768
                                 errors.ECODE_INVAL)
2769
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2770
                       msg="cannot change disk template")
2771
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2772
      if self.op.remote_node_uuid == pnode_uuid:
2773
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2774
                                   " as the primary node of the instance" %
2775
                                   self.op.remote_node, errors.ECODE_STATE)
2776
      CheckNodeOnline(self, self.op.remote_node_uuid)
2777
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2778
      # FIXME: here we assume that the old instance type is DT_PLAIN
2779
      assert self.instance.disk_template == constants.DT_PLAIN
2780
      disks = [{constants.IDISK_SIZE: d.size,
2781
                constants.IDISK_VG: d.logical_id[0]}
2782
               for d in self.instance.disks]
2783
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2784
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2785

    
2786
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2787
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2788
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2789
                                                              snode_group)
2790
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2791
                             ignore=self.op.ignore_ipolicy)
2792
      if pnode_info.group != snode_info.group:
2793
        self.LogWarning("The primary and secondary nodes are in two"
2794
                        " different node groups; the disk parameters"
2795
                        " from the first disk's node group will be"
2796
                        " used")
2797

    
2798
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2799
      # Make sure none of the nodes require exclusive storage
2800
      nodes = [pnode_info]
2801
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2802
        assert snode_info
2803
        nodes.append(snode_info)
2804
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2805
      if compat.any(map(has_es, nodes)):
2806
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2807
                  " storage is enabled" % (self.instance.disk_template,
2808
                                           self.op.disk_template))
2809
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2810

    
2811
  def _PreCheckDisks(self, ispec):
2812
    """CheckPrereq checks related to disk changes.
2813

2814
    @type ispec: dict
2815
    @param ispec: instance specs to be updated with the new disks
2816

2817
    """
2818
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2819

    
2820
    excl_stor = compat.any(
2821
      rpc.GetExclusiveStorageForNodes(self.cfg,
2822
                                      self.instance.all_nodes).values()
2823
      )
2824

    
2825
    # Check disk modifications. This is done here and not in CheckArguments
2826
    # (as with NICs), because we need to know the instance's disk template
2827
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2828
    if self.instance.disk_template == constants.DT_EXT:
2829
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2830
    else:
2831
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2832
                      ver_fn)
2833

    
2834
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2835

    
2836
    # Check the validity of the `provider' parameter
2837
    if self.instance.disk_template in constants.DT_EXT:
2838
      for mod in self.diskmod:
2839
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2840
        if mod[0] == constants.DDM_ADD:
2841
          if ext_provider is None:
2842
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2843
                                       " '%s' missing, during disk add" %
2844
                                       (constants.DT_EXT,
2845
                                        constants.IDISK_PROVIDER),
2846
                                       errors.ECODE_NOENT)
2847
        elif mod[0] == constants.DDM_MODIFY:
2848
          if ext_provider:
2849
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2850
                                       " modification" %
2851
                                       constants.IDISK_PROVIDER,
2852
                                       errors.ECODE_INVAL)
2853
    else:
2854
      for mod in self.diskmod:
2855
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2856
        if ext_provider is not None:
2857
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2858
                                     " instances of type '%s'" %
2859
                                     (constants.IDISK_PROVIDER,
2860
                                      constants.DT_EXT),
2861
                                     errors.ECODE_INVAL)
2862

    
2863
    if not self.op.wait_for_sync and self.instance.disks_active:
2864
      for mod in self.diskmod:
2865
        if mod[0] == constants.DDM_ADD:
2866
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2867
                                     " activated disks and"
2868
                                     " --no-wait-for-sync given.",
2869
                                     errors.ECODE_INVAL)
2870

    
2871
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2872
      raise errors.OpPrereqError("Disk operations not supported for"
2873
                                 " diskless instances", errors.ECODE_INVAL)
2874

    
2875
    def _PrepareDiskMod(_, disk, params, __):
2876
      disk.name = params.get(constants.IDISK_NAME, None)
2877

    
2878
    # Verify disk changes (operating on a copy)
2879
    disks = copy.deepcopy(self.instance.disks)
2880
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2881
                        _PrepareDiskMod, None)
2882
    utils.ValidateDeviceNames("disk", disks)
2883
    if len(disks) > constants.MAX_DISKS:
2884
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2885
                                 " more" % constants.MAX_DISKS,
2886
                                 errors.ECODE_STATE)
2887
    disk_sizes = [disk.size for disk in self.instance.disks]
2888
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2889
                      self.diskmod if op == constants.DDM_ADD)
2890
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2891
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2892

    
2893
    if self.op.offline is not None and self.op.offline:
2894
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2895
                         msg="can't change to offline")
2896

    
2897
  def CheckPrereq(self):
2898
    """Check prerequisites.
2899

2900
    This only checks the instance list against the existing names.
2901

2902
    """
2903
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2904
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2905
    self.cluster = self.cfg.GetClusterInfo()
2906
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2907

    
2908
    assert self.instance is not None, \
2909
      "Cannot retrieve locked instance %s" % self.op.instance_name
2910

    
2911
    pnode_uuid = self.instance.primary_node
2912

    
2913
    self.warn = []
2914

    
2915
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2916
        not self.op.force):
2917
      # verify that the instance is not up
2918
      instance_info = self.rpc.call_instance_info(
2919
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2920
          cluster_hvparams)
2921
      if instance_info.fail_msg:
2922
        self.warn.append("Can't get instance runtime information: %s" %
2923
                         instance_info.fail_msg)
2924
      elif instance_info.payload:
2925
        raise errors.OpPrereqError("Instance is still running on %s" %
2926
                                   self.cfg.GetNodeName(pnode_uuid),
2927
                                   errors.ECODE_STATE)
2928

    
2929
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2930
    node_uuids = list(self.instance.all_nodes)
2931
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2932

    
2933
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2934
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2935
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2936

    
2937
    # dictionary with instance information after the modification
2938
    ispec = {}
2939

    
2940
    if self.op.hotplug or self.op.hotplug_if_possible:
2941
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2942
                                               self.instance)
2943
      if result.fail_msg:
2944
        if self.op.hotplug:
2945
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2946
                       prereq=True)
2947
        else:
2948
          self.LogWarning(result.fail_msg)
2949
          self.op.hotplug = False
2950
          self.LogInfo("Modification will take place without hotplugging.")
2951
      else:
2952
        self.op.hotplug = True
2953

    
2954
    # Prepare NIC modifications
2955
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2956

    
2957
    # OS change
2958
    if self.op.os_name and not self.op.force:
2959
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2960
                     self.op.force_variant)
2961
      instance_os = self.op.os_name
2962
    else:
2963
      instance_os = self.instance.os
2964

    
2965
    assert not (self.op.disk_template and self.op.disks), \
2966
      "Can't modify disk template and apply disk changes at the same time"
2967

    
2968
    if self.op.disk_template:
2969
      self._PreCheckDiskTemplate(pnode_info)
2970

    
2971
    self._PreCheckDisks(ispec)
2972

    
2973
    # hvparams processing
2974
    if self.op.hvparams:
2975
      hv_type = self.instance.hypervisor
2976
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2977
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2978
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2979

    
2980
      # local check
2981
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2982
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2983
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2984
      self.hv_inst = i_hvdict # the new dict (without defaults)
2985
    else:
2986
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2987
                                                   self.instance.os,
2988
                                                   self.instance.hvparams)
2989
      self.hv_new = self.hv_inst = {}
2990

    
2991
    # beparams processing
2992
    if self.op.beparams:
2993
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2994
                                  use_none=True)
2995
      objects.UpgradeBeParams(i_bedict)
2996
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2997
      be_new = self.cluster.SimpleFillBE(i_bedict)
2998
      self.be_proposed = self.be_new = be_new # the new actual values
2999
      self.be_inst = i_bedict # the new dict (without defaults)
3000
    else:
3001
      self.be_new = self.be_inst = {}
3002
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3003
    be_old = self.cluster.FillBE(self.instance)
3004

    
3005
    # CPU param validation -- checking every time a parameter is
3006
    # changed to cover all cases where either CPU mask or vcpus have
3007
    # changed
3008
    if (constants.BE_VCPUS in self.be_proposed and
3009
        constants.HV_CPU_MASK in self.hv_proposed):
3010
      cpu_list = \
3011
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3012
      # Verify mask is consistent with number of vCPUs. Can skip this
3013
      # test if only 1 entry in the CPU mask, which means same mask
3014
      # is applied to all vCPUs.
3015
      if (len(cpu_list) > 1 and
3016
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3017
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3018
                                   " CPU mask [%s]" %
3019
                                   (self.be_proposed[constants.BE_VCPUS],
3020
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3021
                                   errors.ECODE_INVAL)
3022

    
3023
      # Only perform this test if a new CPU mask is given
3024
      if constants.HV_CPU_MASK in self.hv_new:
3025
        # Calculate the largest CPU number requested
3026
        max_requested_cpu = max(map(max, cpu_list))
3027
        # Check that all of the instance's nodes have enough physical CPUs to
3028
        # satisfy the requested CPU mask
3029
        hvspecs = [(self.instance.hypervisor,
3030
                    self.cfg.GetClusterInfo()
3031
                      .hvparams[self.instance.hypervisor])]
3032
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3033
                                max_requested_cpu + 1,
3034
                                hvspecs)
3035

    
3036
    # osparams processing
3037
    if self.op.osparams or self.op.osparams_private:
3038
      public_parms = self.op.osparams or {}
3039
      private_parms = self.op.osparams_private or {}
3040
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3041

    
3042
      if dupe_keys:
3043
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3044
                                   utils.CommaJoin(dupe_keys))
3045

    
3046
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3047
                                      public_parms)
3048
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3049
                                              private_parms)
3050

    
3051
      CheckOSParams(self, True, node_uuids, instance_os,
3052
                    objects.FillDict(self.os_inst,
3053
                                     self.os_inst_private))
3054

    
3055
    else:
3056
      self.os_inst = {}
3057
      self.os_inst_private = {}
3058

    
3059
    #TODO(dynmem): do the appropriate check involving MINMEM
3060
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3061
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3062
      mem_check_list = [pnode_uuid]
3063
      if be_new[constants.BE_AUTO_BALANCE]:
3064
        # either we changed auto_balance to yes or it was from before
3065
        mem_check_list.extend(self.instance.secondary_nodes)
3066
      instance_info = self.rpc.call_instance_info(
3067
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3068
          cluster_hvparams)
3069
      hvspecs = [(self.instance.hypervisor,
3070
                  cluster_hvparams)]
3071
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3072
                                         hvspecs)
3073
      pninfo = nodeinfo[pnode_uuid]
3074
      msg = pninfo.fail_msg
3075
      if msg:
3076
        # Assume the primary node is unreachable and go ahead
3077
        self.warn.append("Can't get info from primary node %s: %s" %
3078
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3079
      else:
3080
        (_, _, (pnhvinfo, )) = pninfo.payload
3081
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3082
          self.warn.append("Node data from primary node %s doesn't contain"
3083
                           " free memory information" %
3084
                           self.cfg.GetNodeName(pnode_uuid))
3085
        elif instance_info.fail_msg:
3086
          self.warn.append("Can't get instance runtime information: %s" %
3087
                           instance_info.fail_msg)
3088
        else:
3089
          if instance_info.payload:
3090
            current_mem = int(instance_info.payload["memory"])
3091
          else:
3092
            # Assume instance not running
3093
            # (there is a slight race condition here, but it's not very
3094
            # probable, and we have no other way to check)
3095
            # TODO: Describe race condition
3096
            current_mem = 0
3097
          #TODO(dynmem): do the appropriate check involving MINMEM
3098
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3099
                      pnhvinfo["memory_free"])
3100
          if miss_mem > 0:
3101
            raise errors.OpPrereqError("This change will prevent the instance"
3102
                                       " from starting, due to %d MB of memory"
3103
                                       " missing on its primary node" %
3104
                                       miss_mem, errors.ECODE_NORES)
3105

    
3106
      if be_new[constants.BE_AUTO_BALANCE]:
3107
        for node_uuid, nres in nodeinfo.items():
3108
          if node_uuid not in self.instance.secondary_nodes:
3109
            continue
3110
          nres.Raise("Can't get info from secondary node %s" %
3111
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3112
                     ecode=errors.ECODE_STATE)
3113
          (_, _, (nhvinfo, )) = nres.payload
3114
          if not isinstance(nhvinfo.get("memory_free", None), int):
3115
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3116
                                       " memory information" %
3117
                                       self.cfg.GetNodeName(node_uuid),
3118
                                       errors.ECODE_STATE)
3119
          #TODO(dynmem): do the appropriate check involving MINMEM
3120
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3121
            raise errors.OpPrereqError("This change will prevent the instance"
3122
                                       " from failover to its secondary node"
3123
                                       " %s, due to not enough memory" %
3124
                                       self.cfg.GetNodeName(node_uuid),
3125
                                       errors.ECODE_STATE)
3126

    
3127
    if self.op.runtime_mem:
3128
      remote_info = self.rpc.call_instance_info(
3129
         self.instance.primary_node, self.instance.name,
3130
         self.instance.hypervisor,
3131
         cluster_hvparams)
3132
      remote_info.Raise("Error checking node %s" %
3133
                        self.cfg.GetNodeName(self.instance.primary_node))
3134
      if not remote_info.payload: # not running already
3135
        raise errors.OpPrereqError("Instance %s is not running" %
3136
                                   self.instance.name, errors.ECODE_STATE)
3137

    
3138
      current_memory = remote_info.payload["memory"]
3139
      if (not self.op.force and
3140
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3141
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3142
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3143
                                   " and %d MB of memory unless --force is"
3144
                                   " given" %
3145
                                   (self.instance.name,
3146
                                    self.be_proposed[constants.BE_MINMEM],
3147
                                    self.be_proposed[constants.BE_MAXMEM]),
3148
                                   errors.ECODE_INVAL)
3149

    
3150
      delta = self.op.runtime_mem - current_memory
3151
      if delta > 0:
3152
        CheckNodeFreeMemory(
3153
            self, self.instance.primary_node,
3154
            "ballooning memory for instance %s" % self.instance.name, delta,
3155
            self.instance.hypervisor,
3156
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3157

    
3158
    # make self.cluster visible in the functions below
3159
    cluster = self.cluster
3160

    
3161
    def _PrepareNicCreate(_, params, private):
3162
      self._PrepareNicModification(params, private, None, None,
3163
                                   {}, cluster, pnode_uuid)
3164
      return (None, None)
3165

    
3166
    def _PrepareNicMod(_, nic, params, private):
3167
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3168
                                   nic.nicparams, cluster, pnode_uuid)
3169
      return None
3170

    
3171
    def _PrepareNicRemove(_, params, __):
3172
      ip = params.ip
3173
      net = params.network
3174
      if net is not None and ip is not None:
3175
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3176

    
3177
    # Verify NIC changes (operating on copy)
3178
    nics = self.instance.nics[:]
3179
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3180
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3181
    if len(nics) > constants.MAX_NICS:
3182
      raise errors.OpPrereqError("Instance has too many network interfaces"
3183
                                 " (%d), cannot add more" % constants.MAX_NICS,
3184
                                 errors.ECODE_STATE)
3185

    
3186
    # Pre-compute NIC changes (necessary to use result in hooks)
3187
    self._nic_chgdesc = []
3188
    if self.nicmod:
3189
      # Operate on copies as this is still in prereq
3190
      nics = [nic.Copy() for nic in self.instance.nics]
3191
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3192
                          self._CreateNewNic, self._ApplyNicMods,
3193
                          self._RemoveNic)
3194
      # Verify that NIC names are unique and valid
3195
      utils.ValidateDeviceNames("NIC", nics)
3196
      self._new_nics = nics
3197
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3198
    else:
3199
      self._new_nics = None
3200
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3201

    
3202
    if not self.op.ignore_ipolicy:
3203
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3204
                                                              group_info)
3205

    
3206
      # Fill ispec with backend parameters
3207
      ispec[constants.ISPEC_SPINDLE_USE] = \
3208
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3209
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3210
                                                         None)
3211

    
3212
      # Copy ispec to verify parameters with min/max values separately
3213
      if self.op.disk_template:
3214
        new_disk_template = self.op.disk_template
3215
      else:
3216
        new_disk_template = self.instance.disk_template
3217
      ispec_max = ispec.copy()
3218
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3219
        self.be_new.get(constants.BE_MAXMEM, None)
3220
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3221
                                                     new_disk_template)
3222
      ispec_min = ispec.copy()
3223
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3224
        self.be_new.get(constants.BE_MINMEM, None)
3225
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3226
                                                     new_disk_template)
3227

    
3228
      if (res_max or res_min):
3229
        # FIXME: Improve error message by including information about whether
3230
        # the upper or lower limit of the parameter fails the ipolicy.
3231
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3232
               (group_info, group_info.name,
3233
                utils.CommaJoin(set(res_max + res_min))))
3234
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3235

    
3236
  def _ConvertPlainToDrbd(self, feedback_fn):
3237
    """Converts an instance from plain to drbd.
3238

3239
    """
3240
    feedback_fn("Converting template to drbd")
3241
    pnode_uuid = self.instance.primary_node
3242
    snode_uuid = self.op.remote_node_uuid
3243

    
3244
    assert self.instance.disk_template == constants.DT_PLAIN
3245

    
3246
    # create a fake disk info for _GenerateDiskTemplate
3247
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3248
                  constants.IDISK_VG: d.logical_id[0],
3249
                  constants.IDISK_NAME: d.name}
3250
                 for d in self.instance.disks]
3251
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3252
                                     self.instance.uuid, pnode_uuid,
3253
                                     [snode_uuid], disk_info, None, None, 0,
3254
                                     feedback_fn, self.diskparams)
3255
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3256
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3257
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3258
    info = GetInstanceInfoText(self.instance)
3259
    feedback_fn("Creating additional volumes...")
3260
    # first, create the missing data and meta devices
3261
    for disk in anno_disks:
3262
      # unfortunately this is... not too nice
3263
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3264
                           info, True, p_excl_stor)
3265
      for child in disk.children:
3266
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3267
                             s_excl_stor)
3268
    # at this stage, all new LVs have been created, we can rename the
3269
    # old ones
3270
    feedback_fn("Renaming original volumes...")
3271
    rename_list = [(o, n.children[0].logical_id)
3272
                   for (o, n) in zip(self.instance.disks, new_disks)]
3273
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3274
    result.Raise("Failed to rename original LVs")
3275

    
3276
    feedback_fn("Initializing DRBD devices...")
3277
    # all child devices are in place, we can now create the DRBD devices
3278
    try:
3279
      for disk in anno_disks:
3280
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3281
                                       (snode_uuid, s_excl_stor)]:
3282
          f_create = node_uuid == pnode_uuid
3283
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3284
                               f_create, excl_stor)
3285
    except errors.GenericError, e:
3286
      feedback_fn("Initializing of DRBD devices failed;"
3287
                  " renaming back original volumes...")
3288
      rename_back_list = [(n.children[0], o.logical_id)
3289
                          for (n, o) in zip(new_disks, self.instance.disks)]
3290
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3291
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3292
      raise
3293

    
3294
    # at this point, the instance has been modified
3295
    self.instance.disk_template = constants.DT_DRBD8
3296
    self.instance.disks = new_disks
3297
    self.cfg.Update(self.instance, feedback_fn)
3298

    
3299
    # Release node locks while waiting for sync
3300
    ReleaseLocks(self, locking.LEVEL_NODE)
3301

    
3302
    # disks are created, waiting for sync
3303
    disk_abort = not WaitForSync(self, self.instance,
3304
                                 oneshot=not self.op.wait_for_sync)
3305
    if disk_abort:
3306
      raise errors.OpExecError("There are some degraded disks for"
3307
                               " this instance, please cleanup manually")
3308

    
3309
    # Node resource locks will be released by caller
3310

    
3311
  def _ConvertDrbdToPlain(self, feedback_fn):
3312
    """Converts an instance from drbd to plain.
3313

3314
    """
3315
    assert len(self.instance.secondary_nodes) == 1
3316
    assert self.instance.disk_template == constants.DT_DRBD8
3317

    
3318
    pnode_uuid = self.instance.primary_node
3319
    snode_uuid = self.instance.secondary_nodes[0]
3320
    feedback_fn("Converting template to plain")
3321

    
3322
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3323
    new_disks = [d.children[0] for d in self.instance.disks]
3324

    
3325
    # copy over size, mode and name
3326
    for parent, child in zip(old_disks, new_disks):
3327
      child.size = parent.size
3328
      child.mode = parent.mode
3329
      child.name = parent.name
3330

    
3331
    # this is a DRBD disk, return its port to the pool
3332
    # NOTE: this must be done right before the call to cfg.Update!
3333
    for disk in old_disks:
3334
      tcp_port = disk.logical_id[2]
3335
      self.cfg.AddTcpUdpPort(tcp_port)
3336

    
3337
    # update instance structure
3338
    self.instance.disks = new_disks
3339
    self.instance.disk_template = constants.DT_PLAIN
3340
    _UpdateIvNames(0, self.instance.disks)
3341
    self.cfg.Update(self.instance, feedback_fn)
3342

    
3343
    # Release locks in case removing disks takes a while
3344
    ReleaseLocks(self, locking.LEVEL_NODE)
3345

    
3346
    feedback_fn("Removing volumes on the secondary node...")
3347
    for disk in old_disks:
3348
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3349
      result.Warn("Could not remove block device %s on node %s,"
3350
                  " continuing anyway" %
3351
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3352
                  self.LogWarning)
3353

    
3354
    feedback_fn("Removing unneeded volumes on the primary node...")
3355
    for idx, disk in enumerate(old_disks):
3356
      meta = disk.children[1]
3357
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3358
      result.Warn("Could not remove metadata for disk %d on node %s,"
3359
                  " continuing anyway" %
3360
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3361
                  self.LogWarning)
3362

    
3363
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3364
    self.LogInfo("Trying to hotplug device...")
3365
    msg = "hotplug:"
3366
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3367
                                          self.instance, action, dev_type,
3368
                                          (device, self.instance),
3369
                                          extra, seq)
3370
    if result.fail_msg:
3371
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3372
      self.LogInfo("Continuing execution..")
3373
      msg += "failed"
3374
    else:
3375
      self.LogInfo("Hotplug done.")
3376
      msg += "done"
3377
    return msg
3378

    
3379
  def _CreateNewDisk(self, idx, params, _):
3380
    """Creates a new disk.
3381

3382
    """
3383
    # add a new disk
3384
    if self.instance.disk_template in constants.DTS_FILEBASED:
3385
      (file_driver, file_path) = self.instance.disks[0].logical_id
3386
      file_path = os.path.dirname(file_path)
3387
    else:
3388
      file_driver = file_path = None
3389

    
3390
    disk = \
3391
      GenerateDiskTemplate(self, self.instance.disk_template,
3392
                           self.instance.uuid, self.instance.primary_node,
3393
                           self.instance.secondary_nodes, [params], file_path,
3394
                           file_driver, idx, self.Log, self.diskparams)[0]
3395

    
3396
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3397

    
3398
    if self.cluster.prealloc_wipe_disks:
3399
      # Wipe new disk
3400
      WipeOrCleanupDisks(self, self.instance,
3401
                         disks=[(idx, disk, 0)],
3402
                         cleanup=new_disks)
3403

    
3404
    changes = [
3405
      ("disk/%d" % idx,
3406
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3407
      ]
3408
    if self.op.hotplug:
3409
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3410
                                               (disk, self.instance),
3411
                                               self.instance.name, True, idx)
3412
      if result.fail_msg:
3413
        changes.append(("disk/%d" % idx, "assemble:failed"))
3414
        self.LogWarning("Can't assemble newly created disk %d: %s",
3415
                        idx, result.fail_msg)
3416
      else:
3417
        _, link_name = result.payload
3418
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3419
                                  constants.HOTPLUG_TARGET_DISK,
3420
                                  disk, link_name, idx)
3421
        changes.append(("disk/%d" % idx, msg))
3422

    
3423
    return (disk, changes)
3424

    
3425
  def _PostAddDisk(self, _, disk):
3426
    if not WaitForSync(self, self.instance, disks=[disk],
3427
                       oneshot=not self.op.wait_for_sync):
3428
      raise errors.OpExecError("Failed to sync disks of %s" %
3429
                               self.instance.name)
3430

    
3431
    # the disk is active at this point, so deactivate it if the instance disks
3432
    # are supposed to be inactive
3433
    if not self.instance.disks_active:
3434
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3435

    
3436
  def _ModifyDisk(self, idx, disk, params, _):
3437
    """Modifies a disk.
3438

3439
    """
3440
    changes = []
3441
    if constants.IDISK_MODE in params:
3442
      disk.mode = params.get(constants.IDISK_MODE)
3443
      changes.append(("disk.mode/%d" % idx, disk.mode))
3444

    
3445
    if constants.IDISK_NAME in params:
3446
      disk.name = params.get(constants.IDISK_NAME)
3447
      changes.append(("disk.name/%d" % idx, disk.name))
3448

    
3449
    # Modify arbitrary params in case instance template is ext
3450
    for key, value in params.iteritems():
3451
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3452
          self.instance.disk_template == constants.DT_EXT):
3453
        # stolen from GetUpdatedParams: default means reset/delete
3454
        if value.lower() == constants.VALUE_DEFAULT:
3455
          try:
3456
            del disk.params[key]
3457
          except KeyError:
3458
            pass
3459
        else:
3460
          disk.params[key] = value
3461
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3462

    
3463
    return changes
3464

    
3465
  def _RemoveDisk(self, idx, root, _):
3466
    """Removes a disk.
3467

3468
    """
3469
    hotmsg = ""
3470
    if self.op.hotplug:
3471
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3472
                                   constants.HOTPLUG_TARGET_DISK,
3473
                                   root, None, idx)
3474
      ShutdownInstanceDisks(self, self.instance, [root])
3475

    
3476
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3477
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3478
                             self.instance.primary_node):
3479
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3480
              .fail_msg
3481
      if msg:
3482
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3483
                        " continuing anyway", idx,
3484
                        self.cfg.GetNodeName(node_uuid), msg)
3485

    
3486
    # if this is a DRBD disk, return its port to the pool
3487
    if root.dev_type in constants.DTS_DRBD:
3488
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3489

    
3490
    return hotmsg
3491

    
3492
  def _CreateNewNic(self, idx, params, private):
3493
    """Creates data structure for a new network interface.
3494

3495
    """
3496
    mac = params[constants.INIC_MAC]
3497
    ip = params.get(constants.INIC_IP, None)
3498
    net = params.get(constants.INIC_NETWORK, None)
3499
    name = params.get(constants.INIC_NAME, None)
3500
    net_uuid = self.cfg.LookupNetwork(net)
3501
    #TODO: not private.filled?? can a nic have no nicparams??
3502
    nicparams = private.filled
3503
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3504
                       nicparams=nicparams)
3505
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3506

    
3507
    changes = [
3508
      ("nic.%d" % idx,
3509
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3510
       (mac, ip, private.filled[constants.NIC_MODE],
3511
       private.filled[constants.NIC_LINK], net)),
3512
      ]
3513

    
3514
    if self.op.hotplug:
3515
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3516
                                constants.HOTPLUG_TARGET_NIC,
3517
                                nobj, None, idx)
3518
      changes.append(("nic.%d" % idx, msg))
3519

    
3520
    return (nobj, changes)
3521

    
3522
  def _ApplyNicMods(self, idx, nic, params, private):
3523
    """Modifies a network interface.
3524

3525
    """
3526
    changes = []
3527

    
3528
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3529
      if key in params:
3530
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3531
        setattr(nic, key, params[key])
3532

    
3533
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3534
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3535
    if new_net_uuid != nic.network:
3536
      changes.append(("nic.network/%d" % idx, new_net))
3537
      nic.network = new_net_uuid
3538

    
3539
    if private.filled:
3540
      nic.nicparams = private.filled
3541

    
3542
      for (key, val) in nic.nicparams.items():
3543
        changes.append(("nic.%s/%d" % (key, idx), val))
3544

    
3545
    if self.op.hotplug:
3546
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3547
                                constants.HOTPLUG_TARGET_NIC,
3548
                                nic, None, idx)
3549
      changes.append(("nic/%d" % idx, msg))
3550

    
3551
    return changes
3552

    
3553
  def _RemoveNic(self, idx, nic, _):
3554
    if self.op.hotplug:
3555
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3556
                                 constants.HOTPLUG_TARGET_NIC,
3557
                                 nic, None, idx)
3558

    
3559
  def Exec(self, feedback_fn):
3560
    """Modifies an instance.
3561

3562
    All parameters take effect only at the next restart of the instance.
3563

3564
    """
3565
    # Process here the warnings from CheckPrereq, as we don't have a
3566
    # feedback_fn there.
3567
    # TODO: Replace with self.LogWarning
3568
    for warn in self.warn:
3569
      feedback_fn("WARNING: %s" % warn)
3570

    
3571
    assert ((self.op.disk_template is None) ^
3572
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3573
      "Not owning any node resource locks"
3574

    
3575
    result = []
3576

    
3577
    # New primary node
3578
    if self.op.pnode_uuid:
3579
      self.instance.primary_node = self.op.pnode_uuid
3580

    
3581
    # runtime memory
3582
    if self.op.runtime_mem:
3583
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3584
                                                     self.instance,
3585
                                                     self.op.runtime_mem)
3586
      rpcres.Raise("Cannot modify instance runtime memory")
3587
      result.append(("runtime_memory", self.op.runtime_mem))
3588

    
3589
    # Apply disk changes
3590
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3591
                        self._CreateNewDisk, self._ModifyDisk,
3592
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3593
    _UpdateIvNames(0, self.instance.disks)
3594

    
3595
    if self.op.disk_template:
3596
      if __debug__:
3597
        check_nodes = set(self.instance.all_nodes)
3598
        if self.op.remote_node_uuid:
3599
          check_nodes.add(self.op.remote_node_uuid)
3600
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3601
          owned = self.owned_locks(level)
3602
          assert not (check_nodes - owned), \
3603
            ("Not owning the correct locks, owning %r, expected at least %r" %
3604
             (owned, check_nodes))
3605

    
3606
      r_shut = ShutdownInstanceDisks(self, self.instance)
3607
      if not r_shut:
3608
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3609
                                 " proceed with disk template conversion")
3610
      mode = (self.instance.disk_template, self.op.disk_template)
3611
      try:
3612
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3613
      except:
3614
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3615
        raise
3616
      result.append(("disk_template", self.op.disk_template))
3617

    
3618
      assert self.instance.disk_template == self.op.disk_template, \
3619
        ("Expected disk template '%s', found '%s'" %
3620
         (self.op.disk_template, self.instance.disk_template))
3621

    
3622
    # Release node and resource locks if there are any (they might already have
3623
    # been released during disk conversion)
3624
    ReleaseLocks(self, locking.LEVEL_NODE)
3625
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3626

    
3627
    # Apply NIC changes
3628
    if self._new_nics is not None:
3629
      self.instance.nics = self._new_nics
3630
      result.extend(self._nic_chgdesc)
3631

    
3632
    # hvparams changes
3633
    if self.op.hvparams:
3634
      self.instance.hvparams = self.hv_inst
3635
      for key, val in self.op.hvparams.iteritems():
3636
        result.append(("hv/%s" % key, val))
3637

    
3638
    # beparams changes
3639
    if self.op.beparams:
3640
      self.instance.beparams = self.be_inst
3641
      for key, val in self.op.beparams.iteritems():
3642
        result.append(("be/%s" % key, val))
3643

    
3644
    # OS change
3645
    if self.op.os_name:
3646
      self.instance.os = self.op.os_name
3647

    
3648
    # osparams changes
3649
    if self.op.osparams:
3650
      self.instance.osparams = self.os_inst
3651
      for key, val in self.op.osparams.iteritems():
3652
        result.append(("os/%s" % key, val))
3653

    
3654
    if self.op.osparams_private:
3655
      self.instance.osparams_private = self.os_inst_private
3656
      for key, val in self.op.osparams_private.iteritems():
3657
        # Show the Private(...) blurb.
3658
        result.append(("os_private/%s" % key, repr(val)))
3659

    
3660
    if self.op.offline is None:
3661
      # Ignore
3662
      pass
3663
    elif self.op.offline:
3664
      # Mark instance as offline
3665
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3666
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3667
    else:
3668
      # Mark instance as online, but stopped
3669
      self.cfg.MarkInstanceDown(self.instance.uuid)
3670
      result.append(("admin_state", constants.ADMINST_DOWN))
3671

    
3672
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3673

    
3674
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3675
                self.owned_locks(locking.LEVEL_NODE)), \
3676
      "All node locks should have been released by now"
3677

    
3678
    return result
3679

    
3680
  _DISK_CONVERSIONS = {
3681
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3682
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3683
    }
3684

    
3685

    
3686
class LUInstanceChangeGroup(LogicalUnit):
3687
  HPATH = "instance-change-group"
3688
  HTYPE = constants.HTYPE_INSTANCE
3689
  REQ_BGL = False
3690

    
3691
  def ExpandNames(self):
3692
    self.share_locks = ShareAll()
3693

    
3694
    self.needed_locks = {
3695
      locking.LEVEL_NODEGROUP: [],
3696
      locking.LEVEL_NODE: [],
3697
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3698
      }
3699

    
3700
    self._ExpandAndLockInstance()
3701

    
3702
    if self.op.target_groups:
3703
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3704
                                  self.op.target_groups)
3705
    else:
3706
      self.req_target_uuids = None
3707

    
3708
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3709

    
3710
  def DeclareLocks(self, level):
3711
    if level == locking.LEVEL_NODEGROUP:
3712
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3713

    
3714
      if self.req_target_uuids:
3715
        lock_groups = set(self.req_target_uuids)
3716

    
3717
        # Lock all groups used by instance optimistically; this requires going
3718
        # via the node before it's locked, requiring verification later on
3719
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3720
        lock_groups.update(instance_groups)
3721
      else:
3722
        # No target groups, need to lock all of them
3723
        lock_groups = locking.ALL_SET
3724

    
3725
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3726

    
3727
    elif level == locking.LEVEL_NODE:
3728
      if self.req_target_uuids:
3729
        # Lock all nodes used by instances
3730
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3731
        self._LockInstancesNodes()
3732

    
3733
        # Lock all nodes in all potential target groups
3734
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3735
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3736
        member_nodes = [node_uuid
3737
                        for group in lock_groups
3738
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3739
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3740
      else:
3741
        # Lock all nodes as all groups are potential targets
3742
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3743

    
3744
  def CheckPrereq(self):
3745
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3746
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3747
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3748

    
3749
    assert (self.req_target_uuids is None or
3750
            owned_groups.issuperset(self.req_target_uuids))
3751
    assert owned_instance_names == set([self.op.instance_name])
3752

    
3753
    # Get instance information
3754
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3755

    
3756
    # Check if node groups for locked instance are still correct
3757
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3758
      ("Instance %s's nodes changed while we kept the lock" %
3759
       self.op.instance_name)
3760

    
3761
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3762
                                          owned_groups)
3763

    
3764
    if self.req_target_uuids:
3765
      # User requested specific target groups
3766
      self.target_uuids = frozenset(self.req_target_uuids)
3767
    else:
3768
      # All groups except those used by the instance are potential targets
3769
      self.target_uuids = owned_groups - inst_groups
3770

    
3771
    conflicting_groups = self.target_uuids & inst_groups
3772
    if conflicting_groups:
3773
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3774
                                 " used by the instance '%s'" %
3775
                                 (utils.CommaJoin(conflicting_groups),
3776
                                  self.op.instance_name),
3777
                                 errors.ECODE_INVAL)
3778

    
3779
    if not self.target_uuids:
3780
      raise errors.OpPrereqError("There are no possible target groups",
3781
                                 errors.ECODE_INVAL)
3782

    
3783
  def BuildHooksEnv(self):
3784
    """Build hooks env.
3785

3786
    """
3787
    assert self.target_uuids
3788

    
3789
    env = {
3790
      "TARGET_GROUPS": " ".join(self.target_uuids),
3791
      }
3792

    
3793
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3794

    
3795
    return env
3796

    
3797
  def BuildHooksNodes(self):
3798
    """Build hooks nodes.
3799

3800
    """
3801
    mn = self.cfg.GetMasterNode()
3802
    return ([mn], [mn])
3803

    
3804
  def Exec(self, feedback_fn):
3805
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3806

    
3807
    assert instances == [self.op.instance_name], "Instance not locked"
3808

    
3809
    req = iallocator.IAReqGroupChange(instances=instances,
3810
                                      target_groups=list(self.target_uuids))
3811
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3812

    
3813
    ial.Run(self.op.iallocator)
3814

    
3815
    if not ial.success:
3816
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3817
                                 " instance '%s' using iallocator '%s': %s" %
3818
                                 (self.op.instance_name, self.op.iallocator,
3819
                                  ial.info), errors.ECODE_NORES)
3820

    
3821
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3822

    
3823
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3824
                 " instance '%s'", len(jobs), self.op.instance_name)
3825

    
3826
    return ResultWithJobs(jobs)