Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 81c222af

History | View | Annotate | Download (150.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
import ganeti.rpc.node as rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234
    if vlan:
235
      nicparams[constants.NIC_VLAN] = vlan
236

    
237
    check_params = cluster.SimpleFillNIC(nicparams)
238
    objects.NIC.CheckParameterSyntax(check_params)
239
    net_uuid = cfg.LookupNetwork(net)
240
    name = nic.get(constants.INIC_NAME, None)
241
    if name is not None and name.lower() == constants.VALUE_NONE:
242
      name = None
243
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
244
                          network=net_uuid, nicparams=nicparams)
245
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
246
    nics.append(nic_obj)
247

    
248
  return nics
249

    
250

    
251
def _CheckForConflictingIp(lu, ip, node_uuid):
252
  """In case of conflicting IP address raise error.
253

254
  @type ip: string
255
  @param ip: IP address
256
  @type node_uuid: string
257
  @param node_uuid: node UUID
258

259
  """
260
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261
  if conf_net is not None:
262
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
263
                                " network %s, but the target NIC does not." %
264
                                (ip, conf_net)),
265
                               errors.ECODE_STATE)
266

    
267
  return (None, None)
268

    
269

    
270
def _ComputeIPolicyInstanceSpecViolation(
271
  ipolicy, instance_spec, disk_template,
272
  _compute_fn=ComputeIPolicySpecViolation):
273
  """Compute if instance specs meets the specs of ipolicy.
274

275
  @type ipolicy: dict
276
  @param ipolicy: The ipolicy to verify against
277
  @param instance_spec: dict
278
  @param instance_spec: The instance spec to verify
279
  @type disk_template: string
280
  @param disk_template: the disk template of the instance
281
  @param _compute_fn: The function to verify ipolicy (unittest only)
282
  @see: L{ComputeIPolicySpecViolation}
283

284
  """
285
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
286
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
287
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
288
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
289
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
290
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
291

    
292
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
293
                     disk_sizes, spindle_use, disk_template)
294

    
295

    
296
def _CheckOSVariant(os_obj, name):
297
  """Check whether an OS name conforms to the os variants specification.
298

299
  @type os_obj: L{objects.OS}
300
  @param os_obj: OS object to check
301
  @type name: string
302
  @param name: OS name passed by the user, to check for validity
303

304
  """
305
  variant = objects.OS.GetVariant(name)
306
  if not os_obj.supported_variants:
307
    if variant:
308
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
309
                                 " passed)" % (os_obj.name, variant),
310
                                 errors.ECODE_INVAL)
311
    return
312
  if not variant:
313
    raise errors.OpPrereqError("OS name must include a variant",
314
                               errors.ECODE_INVAL)
315

    
316
  if variant not in os_obj.supported_variants:
317
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
318

    
319

    
320
class LUInstanceCreate(LogicalUnit):
321
  """Create an instance.
322

323
  """
324
  HPATH = "instance-add"
325
  HTYPE = constants.HTYPE_INSTANCE
326
  REQ_BGL = False
327

    
328
  def _CheckDiskTemplateValid(self):
329
    """Checks validity of disk template.
330

331
    """
332
    cluster = self.cfg.GetClusterInfo()
333
    if self.op.disk_template is None:
334
      # FIXME: It would be better to take the default disk template from the
335
      # ipolicy, but for the ipolicy we need the primary node, which we get from
336
      # the iallocator, which wants the disk template as input. To solve this
337
      # chicken-and-egg problem, it should be possible to specify just a node
338
      # group from the iallocator and take the ipolicy from that.
339
      self.op.disk_template = cluster.enabled_disk_templates[0]
340
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
341

    
342
  def _CheckDiskArguments(self):
343
    """Checks validity of disk-related arguments.
344

345
    """
346
    # check that disk's names are unique and valid
347
    utils.ValidateDeviceNames("disk", self.op.disks)
348

    
349
    self._CheckDiskTemplateValid()
350

    
351
    # check disks. parameter names and consistent adopt/no-adopt strategy
352
    has_adopt = has_no_adopt = False
353
    for disk in self.op.disks:
354
      if self.op.disk_template != constants.DT_EXT:
355
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
356
      if constants.IDISK_ADOPT in disk:
357
        has_adopt = True
358
      else:
359
        has_no_adopt = True
360
    if has_adopt and has_no_adopt:
361
      raise errors.OpPrereqError("Either all disks are adopted or none is",
362
                                 errors.ECODE_INVAL)
363
    if has_adopt:
364
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
365
        raise errors.OpPrereqError("Disk adoption is not supported for the"
366
                                   " '%s' disk template" %
367
                                   self.op.disk_template,
368
                                   errors.ECODE_INVAL)
369
      if self.op.iallocator is not None:
370
        raise errors.OpPrereqError("Disk adoption not allowed with an"
371
                                   " iallocator script", errors.ECODE_INVAL)
372
      if self.op.mode == constants.INSTANCE_IMPORT:
373
        raise errors.OpPrereqError("Disk adoption not allowed for"
374
                                   " instance import", errors.ECODE_INVAL)
375
    else:
376
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
377
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
378
                                   " but no 'adopt' parameter given" %
379
                                   self.op.disk_template,
380
                                   errors.ECODE_INVAL)
381

    
382
    self.adopt_disks = has_adopt
383

    
384
  def _CheckVLANArguments(self):
385
    """ Check validity of VLANs if given
386

387
    """
388
    for nic in self.op.nics:
389
      vlan = nic.get(constants.INIC_VLAN, None)
390
      if vlan:
391
        if vlan[0] == ".":
392
          # vlan starting with dot means single untagged vlan,
393
          # might be followed by trunk (:)
394
          if not vlan[1:].isdigit():
395
            vlanlist = vlan[1:].split(':')
396
            for vl in vlanlist:
397
              if not vl.isdigit():
398
                raise errors.OpPrereqError("Specified VLAN parameter is "
399
                                           "invalid : %s" % vlan,
400
                                             errors.ECODE_INVAL)
401
        elif vlan[0] == ":":
402
          # Trunk - tagged only
403
          vlanlist = vlan[1:].split(':')
404
          for vl in vlanlist:
405
            if not vl.isdigit():
406
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
407
                                           " : %s" % vlan, errors.ECODE_INVAL)
408
        elif vlan.isdigit():
409
          # This is the simplest case. No dots, only single digit
410
          # -> Create untagged access port, dot needs to be added
411
          nic[constants.INIC_VLAN] = "." + vlan
412
        else:
413
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
414
                                       " : %s" % vlan, errors.ECODE_INVAL)
415

    
416
  def CheckArguments(self):
417
    """Check arguments.
418

419
    """
420
    # do not require name_check to ease forward/backward compatibility
421
    # for tools
422
    if self.op.no_install and self.op.start:
423
      self.LogInfo("No-installation mode selected, disabling startup")
424
      self.op.start = False
425
    # validate/normalize the instance name
426
    self.op.instance_name = \
427
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
428

    
429
    if self.op.ip_check and not self.op.name_check:
430
      # TODO: make the ip check more flexible and not depend on the name check
431
      raise errors.OpPrereqError("Cannot do IP address check without a name"
432
                                 " check", errors.ECODE_INVAL)
433

    
434
    # add nic for instance communication
435
    if self.op.instance_communication:
436
      nic_name = "%s%s" % (constants.INSTANCE_COMMUNICATION_NIC_PREFIX,
437
                           self.op.instance_name)
438
      communication_network = constants.INSTANCE_COMMUNICATION_NETWORK
439

    
440
      self.op.nics.append({constants.INIC_NAME: nic_name,
441
                           constants.INIC_MAC: constants.VALUE_GENERATE,
442
                           constants.INIC_IP: constants.NIC_IP_POOL,
443
                           constants.INIC_NETWORK: communication_network})
444

    
445
    # check nics' parameter names
446
    for nic in self.op.nics:
447
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
448
    # check that NIC's parameters names are unique and valid
449
    utils.ValidateDeviceNames("NIC", self.op.nics)
450

    
451
    self._CheckVLANArguments()
452

    
453
    self._CheckDiskArguments()
454
    assert self.op.disk_template is not None
455

    
456
    # instance name verification
457
    if self.op.name_check:
458
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
459
      self.op.instance_name = self.hostname.name
460
      # used in CheckPrereq for ip ping check
461
      self.check_ip = self.hostname.ip
462
    else:
463
      self.check_ip = None
464

    
465
    # file storage checks
466
    if (self.op.file_driver and
467
        not self.op.file_driver in constants.FILE_DRIVER):
468
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
469
                                 self.op.file_driver, errors.ECODE_INVAL)
470

    
471
    # set default file_driver if unset and required
472
    if (not self.op.file_driver and
473
        self.op.disk_template in constants.DTS_FILEBASED):
474
      self.op.file_driver = constants.FD_LOOP
475

    
476
    ### Node/iallocator related checks
477
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
478

    
479
    if self.op.pnode is not None:
480
      if self.op.disk_template in constants.DTS_INT_MIRROR:
481
        if self.op.snode is None:
482
          raise errors.OpPrereqError("The networked disk templates need"
483
                                     " a mirror node", errors.ECODE_INVAL)
484
      elif self.op.snode:
485
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
486
                        " template")
487
        self.op.snode = None
488

    
489
    _CheckOpportunisticLocking(self.op)
490

    
491
    if self.op.mode == constants.INSTANCE_IMPORT:
492
      # On import force_variant must be True, because if we forced it at
493
      # initial install, our only chance when importing it back is that it
494
      # works again!
495
      self.op.force_variant = True
496

    
497
      if self.op.no_install:
498
        self.LogInfo("No-installation mode has no effect during import")
499

    
500
    elif self.op.mode == constants.INSTANCE_CREATE:
501
      if self.op.os_type is None:
502
        raise errors.OpPrereqError("No guest OS specified",
503
                                   errors.ECODE_INVAL)
504
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
505
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
506
                                   " installation" % self.op.os_type,
507
                                   errors.ECODE_STATE)
508
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
509
      self._cds = GetClusterDomainSecret()
510

    
511
      # Check handshake to ensure both clusters have the same domain secret
512
      src_handshake = self.op.source_handshake
513
      if not src_handshake:
514
        raise errors.OpPrereqError("Missing source handshake",
515
                                   errors.ECODE_INVAL)
516

    
517
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
518
                                                           src_handshake)
519
      if errmsg:
520
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
521
                                   errors.ECODE_INVAL)
522

    
523
      # Load and check source CA
524
      self.source_x509_ca_pem = self.op.source_x509_ca
525
      if not self.source_x509_ca_pem:
526
        raise errors.OpPrereqError("Missing source X509 CA",
527
                                   errors.ECODE_INVAL)
528

    
529
      try:
530
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
531
                                                    self._cds)
532
      except OpenSSL.crypto.Error, err:
533
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
534
                                   (err, ), errors.ECODE_INVAL)
535

    
536
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
537
      if errcode is not None:
538
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
539
                                   errors.ECODE_INVAL)
540

    
541
      self.source_x509_ca = cert
542

    
543
      src_instance_name = self.op.source_instance_name
544
      if not src_instance_name:
545
        raise errors.OpPrereqError("Missing source instance name",
546
                                   errors.ECODE_INVAL)
547

    
548
      self.source_instance_name = \
549
        netutils.GetHostname(name=src_instance_name).name
550

    
551
    else:
552
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
553
                                 self.op.mode, errors.ECODE_INVAL)
554

    
555
  def ExpandNames(self):
556
    """ExpandNames for CreateInstance.
557

558
    Figure out the right locks for instance creation.
559

560
    """
561
    self.needed_locks = {}
562

    
563
    # this is just a preventive check, but someone might still add this
564
    # instance in the meantime, and creation will fail at lock-add time
565
    if self.op.instance_name in\
566
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
567
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
568
                                 self.op.instance_name, errors.ECODE_EXISTS)
569

    
570
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
571

    
572
    if self.op.iallocator:
573
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
574
      # specifying a group on instance creation and then selecting nodes from
575
      # that group
576
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
577
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
578

    
579
      if self.op.opportunistic_locking:
580
        self.opportunistic_locks[locking.LEVEL_NODE] = True
581
    else:
582
      (self.op.pnode_uuid, self.op.pnode) = \
583
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
584
      nodelist = [self.op.pnode_uuid]
585
      if self.op.snode is not None:
586
        (self.op.snode_uuid, self.op.snode) = \
587
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
588
        nodelist.append(self.op.snode_uuid)
589
      self.needed_locks[locking.LEVEL_NODE] = nodelist
590

    
591
    # in case of import lock the source node too
592
    if self.op.mode == constants.INSTANCE_IMPORT:
593
      src_node = self.op.src_node
594
      src_path = self.op.src_path
595

    
596
      if src_path is None:
597
        self.op.src_path = src_path = self.op.instance_name
598

    
599
      if src_node is None:
600
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
601
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
602
        self.op.src_node = None
603
        if os.path.isabs(src_path):
604
          raise errors.OpPrereqError("Importing an instance from a path"
605
                                     " requires a source node option",
606
                                     errors.ECODE_INVAL)
607
      else:
608
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
609
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
610
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
611
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
612
        if not os.path.isabs(src_path):
613
          self.op.src_path = \
614
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
615

    
616
    self.needed_locks[locking.LEVEL_NODE_RES] = \
617
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
618

    
619
    # Optimistically acquire shared group locks (we're reading the
620
    # configuration).  We can't just call GetInstanceNodeGroups, because the
621
    # instance doesn't exist yet. Therefore we lock all node groups of all
622
    # nodes we have.
623
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
624
      # In the case we lock all nodes for opportunistic allocation, we have no
625
      # choice than to lock all groups, because they're allocated before nodes.
626
      # This is sad, but true. At least we release all those we don't need in
627
      # CheckPrereq later.
628
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
629
    else:
630
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
631
        list(self.cfg.GetNodeGroupsFromNodes(
632
          self.needed_locks[locking.LEVEL_NODE]))
633
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
634

    
635
  def DeclareLocks(self, level):
636
    if level == locking.LEVEL_NODE_RES and \
637
      self.opportunistic_locks[locking.LEVEL_NODE]:
638
      # Even when using opportunistic locking, we require the same set of
639
      # NODE_RES locks as we got NODE locks
640
      self.needed_locks[locking.LEVEL_NODE_RES] = \
641
        self.owned_locks(locking.LEVEL_NODE)
642

    
643
  def _RunAllocator(self):
644
    """Run the allocator based on input opcode.
645

646
    """
647
    if self.op.opportunistic_locking:
648
      # Only consider nodes for which a lock is held
649
      node_name_whitelist = self.cfg.GetNodeNames(
650
        self.owned_locks(locking.LEVEL_NODE))
651
    else:
652
      node_name_whitelist = None
653

    
654
    req = _CreateInstanceAllocRequest(self.op, self.disks,
655
                                      self.nics, self.be_full,
656
                                      node_name_whitelist)
657
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
658

    
659
    ial.Run(self.op.iallocator)
660

    
661
    if not ial.success:
662
      # When opportunistic locks are used only a temporary failure is generated
663
      if self.op.opportunistic_locking:
664
        ecode = errors.ECODE_TEMP_NORES
665
      else:
666
        ecode = errors.ECODE_NORES
667

    
668
      raise errors.OpPrereqError("Can't compute nodes using"
669
                                 " iallocator '%s': %s" %
670
                                 (self.op.iallocator, ial.info),
671
                                 ecode)
672

    
673
    (self.op.pnode_uuid, self.op.pnode) = \
674
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
675
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
676
                 self.op.instance_name, self.op.iallocator,
677
                 utils.CommaJoin(ial.result))
678

    
679
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
680

    
681
    if req.RequiredNodes() == 2:
682
      (self.op.snode_uuid, self.op.snode) = \
683
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
684

    
685
  def BuildHooksEnv(self):
686
    """Build hooks env.
687

688
    This runs on master, primary and secondary nodes of the instance.
689

690
    """
691
    env = {
692
      "ADD_MODE": self.op.mode,
693
      }
694
    if self.op.mode == constants.INSTANCE_IMPORT:
695
      env["SRC_NODE"] = self.op.src_node
696
      env["SRC_PATH"] = self.op.src_path
697
      env["SRC_IMAGES"] = self.src_images
698

    
699
    env.update(BuildInstanceHookEnv(
700
      name=self.op.instance_name,
701
      primary_node_name=self.op.pnode,
702
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
703
      status=self.op.start,
704
      os_type=self.op.os_type,
705
      minmem=self.be_full[constants.BE_MINMEM],
706
      maxmem=self.be_full[constants.BE_MAXMEM],
707
      vcpus=self.be_full[constants.BE_VCPUS],
708
      nics=NICListToTuple(self, self.nics),
709
      disk_template=self.op.disk_template,
710
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
711
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
712
             for d in self.disks],
713
      bep=self.be_full,
714
      hvp=self.hv_full,
715
      hypervisor_name=self.op.hypervisor,
716
      tags=self.op.tags,
717
      ))
718

    
719
    return env
720

    
721
  def BuildHooksNodes(self):
722
    """Build hooks nodes.
723

724
    """
725
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
726
    return nl, nl
727

    
728
  def _ReadExportInfo(self):
729
    """Reads the export information from disk.
730

731
    It will override the opcode source node and path with the actual
732
    information, if these two were not specified before.
733

734
    @return: the export information
735

736
    """
737
    assert self.op.mode == constants.INSTANCE_IMPORT
738

    
739
    if self.op.src_node_uuid is None:
740
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
741
      exp_list = self.rpc.call_export_list(locked_nodes)
742
      found = False
743
      for node_uuid in exp_list:
744
        if exp_list[node_uuid].fail_msg:
745
          continue
746
        if self.op.src_path in exp_list[node_uuid].payload:
747
          found = True
748
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
749
          self.op.src_node_uuid = node_uuid
750
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
751
                                            self.op.src_path)
752
          break
753
      if not found:
754
        raise errors.OpPrereqError("No export found for relative path %s" %
755
                                   self.op.src_path, errors.ECODE_INVAL)
756

    
757
    CheckNodeOnline(self, self.op.src_node_uuid)
758
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
759
    result.Raise("No export or invalid export found in dir %s" %
760
                 self.op.src_path)
761

    
762
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
763
    if not export_info.has_section(constants.INISECT_EXP):
764
      raise errors.ProgrammerError("Corrupted export config",
765
                                   errors.ECODE_ENVIRON)
766

    
767
    ei_version = export_info.get(constants.INISECT_EXP, "version")
768
    if int(ei_version) != constants.EXPORT_VERSION:
769
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
770
                                 (ei_version, constants.EXPORT_VERSION),
771
                                 errors.ECODE_ENVIRON)
772
    return export_info
773

    
774
  def _ReadExportParams(self, einfo):
775
    """Use export parameters as defaults.
776

777
    In case the opcode doesn't specify (as in override) some instance
778
    parameters, then try to use them from the export information, if
779
    that declares them.
780

781
    """
782
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
783

    
784
    if not self.op.disks:
785
      disks = []
786
      # TODO: import the disk iv_name too
787
      for idx in range(constants.MAX_DISKS):
788
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
789
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
790
          disks.append({constants.IDISK_SIZE: disk_sz})
791
      self.op.disks = disks
792
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
793
        raise errors.OpPrereqError("No disk info specified and the export"
794
                                   " is missing the disk information",
795
                                   errors.ECODE_INVAL)
796

    
797
    if not self.op.nics:
798
      nics = []
799
      for idx in range(constants.MAX_NICS):
800
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
801
          ndict = {}
802
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
803
            nic_param_name = "nic%d_%s" % (idx, name)
804
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
805
              v = einfo.get(constants.INISECT_INS, nic_param_name)
806
              ndict[name] = v
807
          nics.append(ndict)
808
        else:
809
          break
810
      self.op.nics = nics
811

    
812
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
813
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
814

    
815
    if (self.op.hypervisor is None and
816
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
817
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
818

    
819
    if einfo.has_section(constants.INISECT_HYP):
820
      # use the export parameters but do not override the ones
821
      # specified by the user
822
      for name, value in einfo.items(constants.INISECT_HYP):
823
        if name not in self.op.hvparams:
824
          self.op.hvparams[name] = value
825

    
826
    if einfo.has_section(constants.INISECT_BEP):
827
      # use the parameters, without overriding
828
      for name, value in einfo.items(constants.INISECT_BEP):
829
        if name not in self.op.beparams:
830
          self.op.beparams[name] = value
831
        # Compatibility for the old "memory" be param
832
        if name == constants.BE_MEMORY:
833
          if constants.BE_MAXMEM not in self.op.beparams:
834
            self.op.beparams[constants.BE_MAXMEM] = value
835
          if constants.BE_MINMEM not in self.op.beparams:
836
            self.op.beparams[constants.BE_MINMEM] = value
837
    else:
838
      # try to read the parameters old style, from the main section
839
      for name in constants.BES_PARAMETERS:
840
        if (name not in self.op.beparams and
841
            einfo.has_option(constants.INISECT_INS, name)):
842
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
843

    
844
    if einfo.has_section(constants.INISECT_OSP):
845
      # use the parameters, without overriding
846
      for name, value in einfo.items(constants.INISECT_OSP):
847
        if name not in self.op.osparams:
848
          self.op.osparams[name] = value
849

    
850
  def _RevertToDefaults(self, cluster):
851
    """Revert the instance parameters to the default values.
852

853
    """
854
    # hvparams
855
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
856
    for name in self.op.hvparams.keys():
857
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
858
        del self.op.hvparams[name]
859
    # beparams
860
    be_defs = cluster.SimpleFillBE({})
861
    for name in self.op.beparams.keys():
862
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
863
        del self.op.beparams[name]
864
    # nic params
865
    nic_defs = cluster.SimpleFillNIC({})
866
    for nic in self.op.nics:
867
      for name in constants.NICS_PARAMETERS:
868
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
869
          del nic[name]
870
    # osparams
871
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
872
    for name in self.op.osparams.keys():
873
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
874
        del self.op.osparams[name]
875

    
876
  def _CalculateFileStorageDir(self):
877
    """Calculate final instance file storage dir.
878

879
    """
880
    # file storage dir calculation/check
881
    self.instance_file_storage_dir = None
882
    if self.op.disk_template in constants.DTS_FILEBASED:
883
      # build the full file storage dir path
884
      joinargs = []
885

    
886
      cfg_storage = None
887
      if self.op.disk_template == constants.DT_FILE:
888
        cfg_storage = self.cfg.GetFileStorageDir()
889
      elif self.op.disk_template == constants.DT_SHARED_FILE:
890
        cfg_storage = self.cfg.GetSharedFileStorageDir()
891
      elif self.op.disk_template == constants.DT_GLUSTER:
892
        cfg_storage = self.cfg.GetGlusterStorageDir()
893

    
894
      if not cfg_storage:
895
        raise errors.OpPrereqError(
896
          "Cluster file storage dir for {tpl} storage type not defined".format(
897
            tpl=repr(self.op.disk_template)
898
          ),
899
          errors.ECODE_STATE
900
      )
901

    
902
      joinargs.append(cfg_storage)
903

    
904
      if self.op.file_storage_dir is not None:
905
        joinargs.append(self.op.file_storage_dir)
906

    
907
      if self.op.disk_template != constants.DT_GLUSTER:
908
        joinargs.append(self.op.instance_name)
909

    
910
      if len(joinargs) > 1:
911
        # pylint: disable=W0142
912
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
913
      else:
914
        self.instance_file_storage_dir = joinargs[0]
915

    
916
  def CheckPrereq(self): # pylint: disable=R0914
917
    """Check prerequisites.
918

919
    """
920
    # Check that the optimistically acquired groups are correct wrt the
921
    # acquired nodes
922
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
923
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
924
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
925
    if not owned_groups.issuperset(cur_groups):
926
      raise errors.OpPrereqError("New instance %s's node groups changed since"
927
                                 " locks were acquired, current groups are"
928
                                 " are '%s', owning groups '%s'; retry the"
929
                                 " operation" %
930
                                 (self.op.instance_name,
931
                                  utils.CommaJoin(cur_groups),
932
                                  utils.CommaJoin(owned_groups)),
933
                                 errors.ECODE_STATE)
934

    
935
    self._CalculateFileStorageDir()
936

    
937
    if self.op.mode == constants.INSTANCE_IMPORT:
938
      export_info = self._ReadExportInfo()
939
      self._ReadExportParams(export_info)
940
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
941
    else:
942
      self._old_instance_name = None
943

    
944
    if (not self.cfg.GetVGName() and
945
        self.op.disk_template not in constants.DTS_NOT_LVM):
946
      raise errors.OpPrereqError("Cluster does not support lvm-based"
947
                                 " instances", errors.ECODE_STATE)
948

    
949
    if (self.op.hypervisor is None or
950
        self.op.hypervisor == constants.VALUE_AUTO):
951
      self.op.hypervisor = self.cfg.GetHypervisorType()
952

    
953
    cluster = self.cfg.GetClusterInfo()
954
    enabled_hvs = cluster.enabled_hypervisors
955
    if self.op.hypervisor not in enabled_hvs:
956
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
957
                                 " cluster (%s)" %
958
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
959
                                 errors.ECODE_STATE)
960

    
961
    # Check tag validity
962
    for tag in self.op.tags:
963
      objects.TaggableObject.ValidateTag(tag)
964

    
965
    # check hypervisor parameter syntax (locally)
966
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
967
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
968
                                      self.op.hvparams)
969
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
970
    hv_type.CheckParameterSyntax(filled_hvp)
971
    self.hv_full = filled_hvp
972
    # check that we don't specify global parameters on an instance
973
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
974
                         "instance", "cluster")
975

    
976
    # fill and remember the beparams dict
977
    self.be_full = _ComputeFullBeParams(self.op, cluster)
978

    
979
    # build os parameters
980
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
981

    
982
    # now that hvp/bep are in final format, let's reset to defaults,
983
    # if told to do so
984
    if self.op.identify_defaults:
985
      self._RevertToDefaults(cluster)
986

    
987
    # NIC buildup
988
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
989
                             self.proc.GetECId())
990

    
991
    # disk checks/pre-build
992
    default_vg = self.cfg.GetVGName()
993
    self.disks = ComputeDisks(self.op, default_vg)
994

    
995
    if self.op.mode == constants.INSTANCE_IMPORT:
996
      disk_images = []
997
      for idx in range(len(self.disks)):
998
        option = "disk%d_dump" % idx
999
        if export_info.has_option(constants.INISECT_INS, option):
1000
          # FIXME: are the old os-es, disk sizes, etc. useful?
1001
          export_name = export_info.get(constants.INISECT_INS, option)
1002
          image = utils.PathJoin(self.op.src_path, export_name)
1003
          disk_images.append(image)
1004
        else:
1005
          disk_images.append(False)
1006

    
1007
      self.src_images = disk_images
1008

    
1009
      if self.op.instance_name == self._old_instance_name:
1010
        for idx, nic in enumerate(self.nics):
1011
          if nic.mac == constants.VALUE_AUTO:
1012
            nic_mac_ini = "nic%d_mac" % idx
1013
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1014

    
1015
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1016

    
1017
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1018
    if self.op.ip_check:
1019
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1020
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1021
                                   (self.check_ip, self.op.instance_name),
1022
                                   errors.ECODE_NOTUNIQUE)
1023

    
1024
    #### mac address generation
1025
    # By generating here the mac address both the allocator and the hooks get
1026
    # the real final mac address rather than the 'auto' or 'generate' value.
1027
    # There is a race condition between the generation and the instance object
1028
    # creation, which means that we know the mac is valid now, but we're not
1029
    # sure it will be when we actually add the instance. If things go bad
1030
    # adding the instance will abort because of a duplicate mac, and the
1031
    # creation job will fail.
1032
    for nic in self.nics:
1033
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1034
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1035

    
1036
    #### allocator run
1037

    
1038
    if self.op.iallocator is not None:
1039
      self._RunAllocator()
1040

    
1041
    # Release all unneeded node locks
1042
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1043
                               self.op.src_node_uuid])
1044
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1045
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1046
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1047
    # Release all unneeded group locks
1048
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1049
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1050

    
1051
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1052
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1053
      "Node locks differ from node resource locks"
1054

    
1055
    #### node related checks
1056

    
1057
    # check primary node
1058
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1059
    assert self.pnode is not None, \
1060
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1061
    if pnode.offline:
1062
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1063
                                 pnode.name, errors.ECODE_STATE)
1064
    if pnode.drained:
1065
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1066
                                 pnode.name, errors.ECODE_STATE)
1067
    if not pnode.vm_capable:
1068
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1069
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1070

    
1071
    self.secondaries = []
1072

    
1073
    # Fill in any IPs from IP pools. This must happen here, because we need to
1074
    # know the nic's primary node, as specified by the iallocator
1075
    for idx, nic in enumerate(self.nics):
1076
      net_uuid = nic.network
1077
      if net_uuid is not None:
1078
        nobj = self.cfg.GetNetwork(net_uuid)
1079
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1080
        if netparams is None:
1081
          raise errors.OpPrereqError("No netparams found for network"
1082
                                     " %s. Probably not connected to"
1083
                                     " node's %s nodegroup" %
1084
                                     (nobj.name, self.pnode.name),
1085
                                     errors.ECODE_INVAL)
1086
        self.LogInfo("NIC/%d inherits netparams %s" %
1087
                     (idx, netparams.values()))
1088
        nic.nicparams = dict(netparams)
1089
        if nic.ip is not None:
1090
          if nic.ip.lower() == constants.NIC_IP_POOL:
1091
            try:
1092
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1093
            except errors.ReservationError:
1094
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1095
                                         " from the address pool" % idx,
1096
                                         errors.ECODE_STATE)
1097
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1098
          else:
1099
            try:
1100
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1101
                                 check=self.op.conflicts_check)
1102
            except errors.ReservationError:
1103
              raise errors.OpPrereqError("IP address %s already in use"
1104
                                         " or does not belong to network %s" %
1105
                                         (nic.ip, nobj.name),
1106
                                         errors.ECODE_NOTUNIQUE)
1107

    
1108
      # net is None, ip None or given
1109
      elif self.op.conflicts_check:
1110
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1111

    
1112
    # mirror node verification
1113
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1114
      if self.op.snode_uuid == pnode.uuid:
1115
        raise errors.OpPrereqError("The secondary node cannot be the"
1116
                                   " primary node", errors.ECODE_INVAL)
1117
      CheckNodeOnline(self, self.op.snode_uuid)
1118
      CheckNodeNotDrained(self, self.op.snode_uuid)
1119
      CheckNodeVmCapable(self, self.op.snode_uuid)
1120
      self.secondaries.append(self.op.snode_uuid)
1121

    
1122
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1123
      if pnode.group != snode.group:
1124
        self.LogWarning("The primary and secondary nodes are in two"
1125
                        " different node groups; the disk parameters"
1126
                        " from the first disk's node group will be"
1127
                        " used")
1128

    
1129
    nodes = [pnode]
1130
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1131
      nodes.append(snode)
1132
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1133
    excl_stor = compat.any(map(has_es, nodes))
1134
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1135
      raise errors.OpPrereqError("Disk template %s not supported with"
1136
                                 " exclusive storage" % self.op.disk_template,
1137
                                 errors.ECODE_STATE)
1138
    for disk in self.disks:
1139
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1140

    
1141
    node_uuids = [pnode.uuid] + self.secondaries
1142

    
1143
    if not self.adopt_disks:
1144
      if self.op.disk_template == constants.DT_RBD:
1145
        # _CheckRADOSFreeSpace() is just a placeholder.
1146
        # Any function that checks prerequisites can be placed here.
1147
        # Check if there is enough space on the RADOS cluster.
1148
        CheckRADOSFreeSpace()
1149
      elif self.op.disk_template == constants.DT_EXT:
1150
        # FIXME: Function that checks prereqs if needed
1151
        pass
1152
      elif self.op.disk_template in constants.DTS_LVM:
1153
        # Check lv size requirements, if not adopting
1154
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1155
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1156
      else:
1157
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1158
        pass
1159

    
1160
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1161
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1162
                                disk[constants.IDISK_ADOPT])
1163
                     for disk in self.disks])
1164
      if len(all_lvs) != len(self.disks):
1165
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1166
                                   errors.ECODE_INVAL)
1167
      for lv_name in all_lvs:
1168
        try:
1169
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1170
          # to ReserveLV uses the same syntax
1171
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1172
        except errors.ReservationError:
1173
          raise errors.OpPrereqError("LV named %s used by another instance" %
1174
                                     lv_name, errors.ECODE_NOTUNIQUE)
1175

    
1176
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1177
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1178

    
1179
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1180
                                       vg_names.payload.keys())[pnode.uuid]
1181
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1182
      node_lvs = node_lvs.payload
1183

    
1184
      delta = all_lvs.difference(node_lvs.keys())
1185
      if delta:
1186
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1187
                                   utils.CommaJoin(delta),
1188
                                   errors.ECODE_INVAL)
1189
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1190
      if online_lvs:
1191
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1192
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1193
                                   errors.ECODE_STATE)
1194
      # update the size of disk based on what is found
1195
      for dsk in self.disks:
1196
        dsk[constants.IDISK_SIZE] = \
1197
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1198
                                        dsk[constants.IDISK_ADOPT])][0]))
1199

    
1200
    elif self.op.disk_template == constants.DT_BLOCK:
1201
      # Normalize and de-duplicate device paths
1202
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1203
                       for disk in self.disks])
1204
      if len(all_disks) != len(self.disks):
1205
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1206
                                   errors.ECODE_INVAL)
1207
      baddisks = [d for d in all_disks
1208
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1209
      if baddisks:
1210
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1211
                                   " cannot be adopted" %
1212
                                   (utils.CommaJoin(baddisks),
1213
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1214
                                   errors.ECODE_INVAL)
1215

    
1216
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1217
                                            list(all_disks))[pnode.uuid]
1218
      node_disks.Raise("Cannot get block device information from node %s" %
1219
                       pnode.name)
1220
      node_disks = node_disks.payload
1221
      delta = all_disks.difference(node_disks.keys())
1222
      if delta:
1223
        raise errors.OpPrereqError("Missing block device(s): %s" %
1224
                                   utils.CommaJoin(delta),
1225
                                   errors.ECODE_INVAL)
1226
      for dsk in self.disks:
1227
        dsk[constants.IDISK_SIZE] = \
1228
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1229

    
1230
    # Check disk access param to be compatible with specified hypervisor
1231
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1232
    node_group = self.cfg.GetNodeGroup(node_info.group)
1233
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1234
    access_type = disk_params[self.op.disk_template].get(
1235
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1236
    )
1237

    
1238
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1239
                                            self.op.disk_template,
1240
                                            access_type):
1241
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1242
                                 " used with %s disk access param" %
1243
                                 (self.op.hypervisor, access_type),
1244
                                  errors.ECODE_STATE)
1245

    
1246
    # Verify instance specs
1247
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1248
    ispec = {
1249
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1250
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1251
      constants.ISPEC_DISK_COUNT: len(self.disks),
1252
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1253
                                  for disk in self.disks],
1254
      constants.ISPEC_NIC_COUNT: len(self.nics),
1255
      constants.ISPEC_SPINDLE_USE: spindle_use,
1256
      }
1257

    
1258
    group_info = self.cfg.GetNodeGroup(pnode.group)
1259
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1260
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1261
                                               self.op.disk_template)
1262
    if not self.op.ignore_ipolicy and res:
1263
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1264
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1265
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1266

    
1267
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1268

    
1269
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1270
    # check OS parameters (remotely)
1271
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1272

    
1273
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1274

    
1275
    #TODO: _CheckExtParams (remotely)
1276
    # Check parameters for extstorage
1277

    
1278
    # memory check on primary node
1279
    #TODO(dynmem): use MINMEM for checking
1280
    if self.op.start:
1281
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1282
                                self.op.hvparams)
1283
      CheckNodeFreeMemory(self, self.pnode.uuid,
1284
                          "creating instance %s" % self.op.instance_name,
1285
                          self.be_full[constants.BE_MAXMEM],
1286
                          self.op.hypervisor, hvfull)
1287

    
1288
    self.dry_run_result = list(node_uuids)
1289

    
1290
  def Exec(self, feedback_fn):
1291
    """Create and add the instance to the cluster.
1292

1293
    """
1294
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1295
                self.owned_locks(locking.LEVEL_NODE)), \
1296
      "Node locks differ from node resource locks"
1297
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1298

    
1299
    ht_kind = self.op.hypervisor
1300
    if ht_kind in constants.HTS_REQ_PORT:
1301
      network_port = self.cfg.AllocatePort()
1302
    else:
1303
      network_port = None
1304

    
1305
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1306

    
1307
    # This is ugly but we got a chicken-egg problem here
1308
    # We can only take the group disk parameters, as the instance
1309
    # has no disks yet (we are generating them right here).
1310
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1311
    disks = GenerateDiskTemplate(self,
1312
                                 self.op.disk_template,
1313
                                 instance_uuid, self.pnode.uuid,
1314
                                 self.secondaries,
1315
                                 self.disks,
1316
                                 self.instance_file_storage_dir,
1317
                                 self.op.file_driver,
1318
                                 0,
1319
                                 feedback_fn,
1320
                                 self.cfg.GetGroupDiskParams(nodegroup))
1321

    
1322
    iobj = objects.Instance(name=self.op.instance_name,
1323
                            uuid=instance_uuid,
1324
                            os=self.op.os_type,
1325
                            primary_node=self.pnode.uuid,
1326
                            nics=self.nics, disks=disks,
1327
                            disk_template=self.op.disk_template,
1328
                            disks_active=False,
1329
                            admin_state=constants.ADMINST_DOWN,
1330
                            network_port=network_port,
1331
                            beparams=self.op.beparams,
1332
                            hvparams=self.op.hvparams,
1333
                            hypervisor=self.op.hypervisor,
1334
                            osparams=self.op.osparams,
1335
                            )
1336

    
1337
    if self.op.tags:
1338
      for tag in self.op.tags:
1339
        iobj.AddTag(tag)
1340

    
1341
    if self.adopt_disks:
1342
      if self.op.disk_template == constants.DT_PLAIN:
1343
        # rename LVs to the newly-generated names; we need to construct
1344
        # 'fake' LV disks with the old data, plus the new unique_id
1345
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1346
        rename_to = []
1347
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1348
          rename_to.append(t_dsk.logical_id)
1349
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1350
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1351
                                               zip(tmp_disks, rename_to))
1352
        result.Raise("Failed to rename adoped LVs")
1353
    else:
1354
      feedback_fn("* creating instance disks...")
1355
      try:
1356
        CreateDisks(self, iobj)
1357
      except errors.OpExecError:
1358
        self.LogWarning("Device creation failed")
1359
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1360
        raise
1361

    
1362
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1363

    
1364
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1365

    
1366
    # Declare that we don't want to remove the instance lock anymore, as we've
1367
    # added the instance to the config
1368
    del self.remove_locks[locking.LEVEL_INSTANCE]
1369

    
1370
    if self.op.mode == constants.INSTANCE_IMPORT:
1371
      # Release unused nodes
1372
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1373
    else:
1374
      # Release all nodes
1375
      ReleaseLocks(self, locking.LEVEL_NODE)
1376

    
1377
    disk_abort = False
1378
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1379
      feedback_fn("* wiping instance disks...")
1380
      try:
1381
        WipeDisks(self, iobj)
1382
      except errors.OpExecError, err:
1383
        logging.exception("Wiping disks failed")
1384
        self.LogWarning("Wiping instance disks failed (%s)", err)
1385
        disk_abort = True
1386

    
1387
    if disk_abort:
1388
      # Something is already wrong with the disks, don't do anything else
1389
      pass
1390
    elif self.op.wait_for_sync:
1391
      disk_abort = not WaitForSync(self, iobj)
1392
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1393
      # make sure the disks are not degraded (still sync-ing is ok)
1394
      feedback_fn("* checking mirrors status")
1395
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1396
    else:
1397
      disk_abort = False
1398

    
1399
    if disk_abort:
1400
      RemoveDisks(self, iobj)
1401
      self.cfg.RemoveInstance(iobj.uuid)
1402
      # Make sure the instance lock gets removed
1403
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1404
      raise errors.OpExecError("There are some degraded disks for"
1405
                               " this instance")
1406

    
1407
    # instance disks are now active
1408
    iobj.disks_active = True
1409

    
1410
    # Release all node resource locks
1411
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1412

    
1413
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1414
      if self.op.mode == constants.INSTANCE_CREATE:
1415
        if not self.op.no_install:
1416
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1417
                        not self.op.wait_for_sync)
1418
          if pause_sync:
1419
            feedback_fn("* pausing disk sync to install instance OS")
1420
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1421
                                                              (iobj.disks,
1422
                                                               iobj), True)
1423
            for idx, success in enumerate(result.payload):
1424
              if not success:
1425
                logging.warn("pause-sync of instance %s for disk %d failed",
1426
                             self.op.instance_name, idx)
1427

    
1428
          feedback_fn("* running the instance OS create scripts...")
1429
          # FIXME: pass debug option from opcode to backend
1430
          os_add_result = \
1431
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1432
                                          self.op.debug_level)
1433
          if pause_sync:
1434
            feedback_fn("* resuming disk sync")
1435
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1436
                                                              (iobj.disks,
1437
                                                               iobj), False)
1438
            for idx, success in enumerate(result.payload):
1439
              if not success:
1440
                logging.warn("resume-sync of instance %s for disk %d failed",
1441
                             self.op.instance_name, idx)
1442

    
1443
          os_add_result.Raise("Could not add os for instance %s"
1444
                              " on node %s" % (self.op.instance_name,
1445
                                               self.pnode.name))
1446

    
1447
      else:
1448
        if self.op.mode == constants.INSTANCE_IMPORT:
1449
          feedback_fn("* running the instance OS import scripts...")
1450

    
1451
          transfers = []
1452

    
1453
          for idx, image in enumerate(self.src_images):
1454
            if not image:
1455
              continue
1456

    
1457
            # FIXME: pass debug option from opcode to backend
1458
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1459
                                               constants.IEIO_FILE, (image, ),
1460
                                               constants.IEIO_SCRIPT,
1461
                                               ((iobj.disks[idx], iobj), idx),
1462
                                               None)
1463
            transfers.append(dt)
1464

    
1465
          import_result = \
1466
            masterd.instance.TransferInstanceData(self, feedback_fn,
1467
                                                  self.op.src_node_uuid,
1468
                                                  self.pnode.uuid,
1469
                                                  self.pnode.secondary_ip,
1470
                                                  self.op.compress,
1471
                                                  iobj, transfers)
1472
          if not compat.all(import_result):
1473
            self.LogWarning("Some disks for instance %s on node %s were not"
1474
                            " imported successfully" % (self.op.instance_name,
1475
                                                        self.pnode.name))
1476

    
1477
          rename_from = self._old_instance_name
1478

    
1479
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1480
          feedback_fn("* preparing remote import...")
1481
          # The source cluster will stop the instance before attempting to make
1482
          # a connection. In some cases stopping an instance can take a long
1483
          # time, hence the shutdown timeout is added to the connection
1484
          # timeout.
1485
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1486
                             self.op.source_shutdown_timeout)
1487
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1488

    
1489
          assert iobj.primary_node == self.pnode.uuid
1490
          disk_results = \
1491
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1492
                                          self.source_x509_ca,
1493
                                          self._cds, self.op.compress, timeouts)
1494
          if not compat.all(disk_results):
1495
            # TODO: Should the instance still be started, even if some disks
1496
            # failed to import (valid for local imports, too)?
1497
            self.LogWarning("Some disks for instance %s on node %s were not"
1498
                            " imported successfully" % (self.op.instance_name,
1499
                                                        self.pnode.name))
1500

    
1501
          rename_from = self.source_instance_name
1502

    
1503
        else:
1504
          # also checked in the prereq part
1505
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1506
                                       % self.op.mode)
1507

    
1508
        # Run rename script on newly imported instance
1509
        assert iobj.name == self.op.instance_name
1510
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1511
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1512
                                                   rename_from,
1513
                                                   self.op.debug_level)
1514
        result.Warn("Failed to run rename script for %s on node %s" %
1515
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1516

    
1517
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1518

    
1519
    if self.op.start:
1520
      iobj.admin_state = constants.ADMINST_UP
1521
      self.cfg.Update(iobj, feedback_fn)
1522
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1523
                   self.pnode.name)
1524
      feedback_fn("* starting instance...")
1525
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1526
                                            False, self.op.reason)
1527
      result.Raise("Could not start instance")
1528

    
1529
    return list(iobj.all_nodes)
1530

    
1531

    
1532
class LUInstanceRename(LogicalUnit):
1533
  """Rename an instance.
1534

1535
  """
1536
  HPATH = "instance-rename"
1537
  HTYPE = constants.HTYPE_INSTANCE
1538

    
1539
  def CheckArguments(self):
1540
    """Check arguments.
1541

1542
    """
1543
    if self.op.ip_check and not self.op.name_check:
1544
      # TODO: make the ip check more flexible and not depend on the name check
1545
      raise errors.OpPrereqError("IP address check requires a name check",
1546
                                 errors.ECODE_INVAL)
1547

    
1548
  def BuildHooksEnv(self):
1549
    """Build hooks env.
1550

1551
    This runs on master, primary and secondary nodes of the instance.
1552

1553
    """
1554
    env = BuildInstanceHookEnvByObject(self, self.instance)
1555
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1556
    return env
1557

    
1558
  def BuildHooksNodes(self):
1559
    """Build hooks nodes.
1560

1561
    """
1562
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1563
    return (nl, nl)
1564

    
1565
  def CheckPrereq(self):
1566
    """Check prerequisites.
1567

1568
    This checks that the instance is in the cluster and is not running.
1569

1570
    """
1571
    (self.op.instance_uuid, self.op.instance_name) = \
1572
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1573
                                self.op.instance_name)
1574
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1575
    assert instance is not None
1576

    
1577
    # It should actually not happen that an instance is running with a disabled
1578
    # disk template, but in case it does, the renaming of file-based instances
1579
    # will fail horribly. Thus, we test it before.
1580
    if (instance.disk_template in constants.DTS_FILEBASED and
1581
        self.op.new_name != instance.name):
1582
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1583
                               instance.disk_template)
1584

    
1585
    CheckNodeOnline(self, instance.primary_node)
1586
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1587
                       msg="cannot rename")
1588
    self.instance = instance
1589

    
1590
    new_name = self.op.new_name
1591
    if self.op.name_check:
1592
      hostname = _CheckHostnameSane(self, new_name)
1593
      new_name = self.op.new_name = hostname.name
1594
      if (self.op.ip_check and
1595
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1596
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1597
                                   (hostname.ip, new_name),
1598
                                   errors.ECODE_NOTUNIQUE)
1599

    
1600
    instance_names = [inst.name for
1601
                      inst in self.cfg.GetAllInstancesInfo().values()]
1602
    if new_name in instance_names and new_name != instance.name:
1603
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1604
                                 new_name, errors.ECODE_EXISTS)
1605

    
1606
  def Exec(self, feedback_fn):
1607
    """Rename the instance.
1608

1609
    """
1610
    old_name = self.instance.name
1611

    
1612
    rename_file_storage = False
1613
    if (self.instance.disk_template in (constants.DT_FILE,
1614
                                        constants.DT_SHARED_FILE) and
1615
        self.op.new_name != self.instance.name):
1616
      old_file_storage_dir = os.path.dirname(
1617
                               self.instance.disks[0].logical_id[1])
1618
      rename_file_storage = True
1619

    
1620
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1621
    # Change the instance lock. This is definitely safe while we hold the BGL.
1622
    # Otherwise the new lock would have to be added in acquired mode.
1623
    assert self.REQ_BGL
1624
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1625
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1626
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1627

    
1628
    # re-read the instance from the configuration after rename
1629
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1630

    
1631
    if rename_file_storage:
1632
      new_file_storage_dir = os.path.dirname(
1633
                               renamed_inst.disks[0].logical_id[1])
1634
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1635
                                                     old_file_storage_dir,
1636
                                                     new_file_storage_dir)
1637
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1638
                   " (but the instance has been renamed in Ganeti)" %
1639
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1640
                    old_file_storage_dir, new_file_storage_dir))
1641

    
1642
    StartInstanceDisks(self, renamed_inst, None)
1643
    # update info on disks
1644
    info = GetInstanceInfoText(renamed_inst)
1645
    for (idx, disk) in enumerate(renamed_inst.disks):
1646
      for node_uuid in renamed_inst.all_nodes:
1647
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1648
                                                (disk, renamed_inst), info)
1649
        result.Warn("Error setting info on node %s for disk %s" %
1650
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1651
    try:
1652
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1653
                                                 renamed_inst, old_name,
1654
                                                 self.op.debug_level)
1655
      result.Warn("Could not run OS rename script for instance %s on node %s"
1656
                  " (but the instance has been renamed in Ganeti)" %
1657
                  (renamed_inst.name,
1658
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1659
                  self.LogWarning)
1660
    finally:
1661
      ShutdownInstanceDisks(self, renamed_inst)
1662

    
1663
    return renamed_inst.name
1664

    
1665

    
1666
class LUInstanceRemove(LogicalUnit):
1667
  """Remove an instance.
1668

1669
  """
1670
  HPATH = "instance-remove"
1671
  HTYPE = constants.HTYPE_INSTANCE
1672
  REQ_BGL = False
1673

    
1674
  def ExpandNames(self):
1675
    self._ExpandAndLockInstance()
1676
    self.needed_locks[locking.LEVEL_NODE] = []
1677
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1678
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1679

    
1680
  def DeclareLocks(self, level):
1681
    if level == locking.LEVEL_NODE:
1682
      self._LockInstancesNodes()
1683
    elif level == locking.LEVEL_NODE_RES:
1684
      # Copy node locks
1685
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1686
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1687

    
1688
  def BuildHooksEnv(self):
1689
    """Build hooks env.
1690

1691
    This runs on master, primary and secondary nodes of the instance.
1692

1693
    """
1694
    env = BuildInstanceHookEnvByObject(self, self.instance)
1695
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1696
    return env
1697

    
1698
  def BuildHooksNodes(self):
1699
    """Build hooks nodes.
1700

1701
    """
1702
    nl = [self.cfg.GetMasterNode()]
1703
    nl_post = list(self.instance.all_nodes) + nl
1704
    return (nl, nl_post)
1705

    
1706
  def CheckPrereq(self):
1707
    """Check prerequisites.
1708

1709
    This checks that the instance is in the cluster.
1710

1711
    """
1712
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1713
    assert self.instance is not None, \
1714
      "Cannot retrieve locked instance %s" % self.op.instance_name
1715

    
1716
  def Exec(self, feedback_fn):
1717
    """Remove the instance.
1718

1719
    """
1720
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1721
                 self.cfg.GetNodeName(self.instance.primary_node))
1722

    
1723
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1724
                                             self.instance,
1725
                                             self.op.shutdown_timeout,
1726
                                             self.op.reason)
1727
    if self.op.ignore_failures:
1728
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1729
    else:
1730
      result.Raise("Could not shutdown instance %s on node %s" %
1731
                   (self.instance.name,
1732
                    self.cfg.GetNodeName(self.instance.primary_node)))
1733

    
1734
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1735
            self.owned_locks(locking.LEVEL_NODE_RES))
1736
    assert not (set(self.instance.all_nodes) -
1737
                self.owned_locks(locking.LEVEL_NODE)), \
1738
      "Not owning correct locks"
1739

    
1740
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1741

    
1742

    
1743
class LUInstanceMove(LogicalUnit):
1744
  """Move an instance by data-copying.
1745

1746
  """
1747
  HPATH = "instance-move"
1748
  HTYPE = constants.HTYPE_INSTANCE
1749
  REQ_BGL = False
1750

    
1751
  def ExpandNames(self):
1752
    self._ExpandAndLockInstance()
1753
    (self.op.target_node_uuid, self.op.target_node) = \
1754
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1755
                            self.op.target_node)
1756
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1757
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1758
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1759

    
1760
  def DeclareLocks(self, level):
1761
    if level == locking.LEVEL_NODE:
1762
      self._LockInstancesNodes(primary_only=True)
1763
    elif level == locking.LEVEL_NODE_RES:
1764
      # Copy node locks
1765
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1766
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1767

    
1768
  def BuildHooksEnv(self):
1769
    """Build hooks env.
1770

1771
    This runs on master, primary and target nodes of the instance.
1772

1773
    """
1774
    env = {
1775
      "TARGET_NODE": self.op.target_node,
1776
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1777
      }
1778
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1779
    return env
1780

    
1781
  def BuildHooksNodes(self):
1782
    """Build hooks nodes.
1783

1784
    """
1785
    nl = [
1786
      self.cfg.GetMasterNode(),
1787
      self.instance.primary_node,
1788
      self.op.target_node_uuid,
1789
      ]
1790
    return (nl, nl)
1791

    
1792
  def CheckPrereq(self):
1793
    """Check prerequisites.
1794

1795
    This checks that the instance is in the cluster.
1796

1797
    """
1798
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1799
    assert self.instance is not None, \
1800
      "Cannot retrieve locked instance %s" % self.op.instance_name
1801

    
1802
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1803
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1804
                                 self.instance.disk_template,
1805
                                 errors.ECODE_STATE)
1806

    
1807
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1808
    assert target_node is not None, \
1809
      "Cannot retrieve locked node %s" % self.op.target_node
1810

    
1811
    self.target_node_uuid = target_node.uuid
1812
    if target_node.uuid == self.instance.primary_node:
1813
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1814
                                 (self.instance.name, target_node.name),
1815
                                 errors.ECODE_STATE)
1816

    
1817
    cluster = self.cfg.GetClusterInfo()
1818
    bep = cluster.FillBE(self.instance)
1819

    
1820
    for idx, dsk in enumerate(self.instance.disks):
1821
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1822
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1823
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1824
                                   " cannot copy" % idx, errors.ECODE_STATE)
1825

    
1826
    CheckNodeOnline(self, target_node.uuid)
1827
    CheckNodeNotDrained(self, target_node.uuid)
1828
    CheckNodeVmCapable(self, target_node.uuid)
1829
    group_info = self.cfg.GetNodeGroup(target_node.group)
1830
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1831
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1832
                           ignore=self.op.ignore_ipolicy)
1833

    
1834
    if self.instance.admin_state == constants.ADMINST_UP:
1835
      # check memory requirements on the target node
1836
      CheckNodeFreeMemory(
1837
          self, target_node.uuid, "failing over instance %s" %
1838
          self.instance.name, bep[constants.BE_MAXMEM],
1839
          self.instance.hypervisor,
1840
          cluster.hvparams[self.instance.hypervisor])
1841
    else:
1842
      self.LogInfo("Not checking memory on the secondary node as"
1843
                   " instance will not be started")
1844

    
1845
    # check bridge existance
1846
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1847

    
1848
  def Exec(self, feedback_fn):
1849
    """Move an instance.
1850

1851
    The move is done by shutting it down on its present node, copying
1852
    the data over (slow) and starting it on the new node.
1853

1854
    """
1855
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1856
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1857

    
1858
    self.LogInfo("Shutting down instance %s on source node %s",
1859
                 self.instance.name, source_node.name)
1860

    
1861
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1862
            self.owned_locks(locking.LEVEL_NODE_RES))
1863

    
1864
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1865
                                             self.op.shutdown_timeout,
1866
                                             self.op.reason)
1867
    if self.op.ignore_consistency:
1868
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1869
                  " anyway. Please make sure node %s is down. Error details" %
1870
                  (self.instance.name, source_node.name, source_node.name),
1871
                  self.LogWarning)
1872
    else:
1873
      result.Raise("Could not shutdown instance %s on node %s" %
1874
                   (self.instance.name, source_node.name))
1875

    
1876
    # create the target disks
1877
    try:
1878
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1879
    except errors.OpExecError:
1880
      self.LogWarning("Device creation failed")
1881
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1882
      raise
1883

    
1884
    errs = []
1885
    transfers = []
1886
    # activate, get path, create transfer jobs
1887
    for idx, disk in enumerate(self.instance.disks):
1888
      # FIXME: pass debug option from opcode to backend
1889
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1890
                                         constants.IEIO_RAW_DISK,
1891
                                         (disk, self.instance),
1892
                                         constants.IEIO_RAW_DISK,
1893
                                         (disk, self.instance),
1894
                                         None)
1895
      transfers.append(dt)
1896

    
1897
    import_result = \
1898
      masterd.instance.TransferInstanceData(self, feedback_fn,
1899
                                            source_node.uuid,
1900
                                            target_node.uuid,
1901
                                            target_node.secondary_ip,
1902
                                            self.op.compress,
1903
                                            self.instance, transfers)
1904
    if not compat.all(import_result):
1905
      errs.append("Failed to transfer instance data")
1906

    
1907
    if errs:
1908
      self.LogWarning("Some disks failed to copy, aborting")
1909
      try:
1910
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1911
      finally:
1912
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1913
        raise errors.OpExecError("Errors during disk copy: %s" %
1914
                                 (",".join(errs),))
1915

    
1916
    self.instance.primary_node = target_node.uuid
1917
    self.cfg.Update(self.instance, feedback_fn)
1918

    
1919
    self.LogInfo("Removing the disks on the original node")
1920
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1921

    
1922
    # Only start the instance if it's marked as up
1923
    if self.instance.admin_state == constants.ADMINST_UP:
1924
      self.LogInfo("Starting instance %s on node %s",
1925
                   self.instance.name, target_node.name)
1926

    
1927
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1928
                                          ignore_secondaries=True)
1929
      if not disks_ok:
1930
        ShutdownInstanceDisks(self, self.instance)
1931
        raise errors.OpExecError("Can't activate the instance's disks")
1932

    
1933
      result = self.rpc.call_instance_start(target_node.uuid,
1934
                                            (self.instance, None, None), False,
1935
                                            self.op.reason)
1936
      msg = result.fail_msg
1937
      if msg:
1938
        ShutdownInstanceDisks(self, self.instance)
1939
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1940
                                 (self.instance.name, target_node.name, msg))
1941

    
1942

    
1943
class LUInstanceMultiAlloc(NoHooksLU):
1944
  """Allocates multiple instances at the same time.
1945

1946
  """
1947
  REQ_BGL = False
1948

    
1949
  def CheckArguments(self):
1950
    """Check arguments.
1951

1952
    """
1953
    nodes = []
1954
    for inst in self.op.instances:
1955
      if inst.iallocator is not None:
1956
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1957
                                   " instance objects", errors.ECODE_INVAL)
1958
      nodes.append(bool(inst.pnode))
1959
      if inst.disk_template in constants.DTS_INT_MIRROR:
1960
        nodes.append(bool(inst.snode))
1961

    
1962
    has_nodes = compat.any(nodes)
1963
    if compat.all(nodes) ^ has_nodes:
1964
      raise errors.OpPrereqError("There are instance objects providing"
1965
                                 " pnode/snode while others do not",
1966
                                 errors.ECODE_INVAL)
1967

    
1968
    if not has_nodes and self.op.iallocator is None:
1969
      default_iallocator = self.cfg.GetDefaultIAllocator()
1970
      if default_iallocator:
1971
        self.op.iallocator = default_iallocator
1972
      else:
1973
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1974
                                   " given and no cluster-wide default"
1975
                                   " iallocator found; please specify either"
1976
                                   " an iallocator or nodes on the instances"
1977
                                   " or set a cluster-wide default iallocator",
1978
                                   errors.ECODE_INVAL)
1979

    
1980
    _CheckOpportunisticLocking(self.op)
1981

    
1982
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1983
    if dups:
1984
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1985
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1986

    
1987
  def ExpandNames(self):
1988
    """Calculate the locks.
1989

1990
    """
1991
    self.share_locks = ShareAll()
1992
    self.needed_locks = {
1993
      # iallocator will select nodes and even if no iallocator is used,
1994
      # collisions with LUInstanceCreate should be avoided
1995
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1996
      }
1997

    
1998
    if self.op.iallocator:
1999
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2000
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2001

    
2002
      if self.op.opportunistic_locking:
2003
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2004
    else:
2005
      nodeslist = []
2006
      for inst in self.op.instances:
2007
        (inst.pnode_uuid, inst.pnode) = \
2008
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2009
        nodeslist.append(inst.pnode_uuid)
2010
        if inst.snode is not None:
2011
          (inst.snode_uuid, inst.snode) = \
2012
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2013
          nodeslist.append(inst.snode_uuid)
2014

    
2015
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2016
      # Lock resources of instance's primary and secondary nodes (copy to
2017
      # prevent accidential modification)
2018
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2019

    
2020
  def DeclareLocks(self, level):
2021
    if level == locking.LEVEL_NODE_RES and \
2022
      self.opportunistic_locks[locking.LEVEL_NODE]:
2023
      # Even when using opportunistic locking, we require the same set of
2024
      # NODE_RES locks as we got NODE locks
2025
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2026
        self.owned_locks(locking.LEVEL_NODE)
2027

    
2028
  def CheckPrereq(self):
2029
    """Check prerequisite.
2030

2031
    """
2032
    if self.op.iallocator:
2033
      cluster = self.cfg.GetClusterInfo()
2034
      default_vg = self.cfg.GetVGName()
2035
      ec_id = self.proc.GetECId()
2036

    
2037
      if self.op.opportunistic_locking:
2038
        # Only consider nodes for which a lock is held
2039
        node_whitelist = self.cfg.GetNodeNames(
2040
                           list(self.owned_locks(locking.LEVEL_NODE)))
2041
      else:
2042
        node_whitelist = None
2043

    
2044
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2045
                                           _ComputeNics(op, cluster, None,
2046
                                                        self.cfg, ec_id),
2047
                                           _ComputeFullBeParams(op, cluster),
2048
                                           node_whitelist)
2049
               for op in self.op.instances]
2050

    
2051
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2052
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2053

    
2054
      ial.Run(self.op.iallocator)
2055

    
2056
      if not ial.success:
2057
        raise errors.OpPrereqError("Can't compute nodes using"
2058
                                   " iallocator '%s': %s" %
2059
                                   (self.op.iallocator, ial.info),
2060
                                   errors.ECODE_NORES)
2061

    
2062
      self.ia_result = ial.result
2063

    
2064
    if self.op.dry_run:
2065
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2066
        constants.JOB_IDS_KEY: [],
2067
        })
2068

    
2069
  def _ConstructPartialResult(self):
2070
    """Contructs the partial result.
2071

2072
    """
2073
    if self.op.iallocator:
2074
      (allocatable, failed_insts) = self.ia_result
2075
      allocatable_insts = map(compat.fst, allocatable)
2076
    else:
2077
      allocatable_insts = [op.instance_name for op in self.op.instances]
2078
      failed_insts = []
2079

    
2080
    return {
2081
      constants.ALLOCATABLE_KEY: allocatable_insts,
2082
      constants.FAILED_KEY: failed_insts,
2083
      }
2084

    
2085
  def Exec(self, feedback_fn):
2086
    """Executes the opcode.
2087

2088
    """
2089
    jobs = []
2090
    if self.op.iallocator:
2091
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2092
      (allocatable, failed) = self.ia_result
2093

    
2094
      for (name, node_names) in allocatable:
2095
        op = op2inst.pop(name)
2096

    
2097
        (op.pnode_uuid, op.pnode) = \
2098
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2099
        if len(node_names) > 1:
2100
          (op.snode_uuid, op.snode) = \
2101
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2102

    
2103
          jobs.append([op])
2104

    
2105
        missing = set(op2inst.keys()) - set(failed)
2106
        assert not missing, \
2107
          "Iallocator did return incomplete result: %s" % \
2108
          utils.CommaJoin(missing)
2109
    else:
2110
      jobs.extend([op] for op in self.op.instances)
2111

    
2112
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2113

    
2114

    
2115
class _InstNicModPrivate:
2116
  """Data structure for network interface modifications.
2117

2118
  Used by L{LUInstanceSetParams}.
2119

2120
  """
2121
  def __init__(self):
2122
    self.params = None
2123
    self.filled = None
2124

    
2125

    
2126
def _PrepareContainerMods(mods, private_fn):
2127
  """Prepares a list of container modifications by adding a private data field.
2128

2129
  @type mods: list of tuples; (operation, index, parameters)
2130
  @param mods: List of modifications
2131
  @type private_fn: callable or None
2132
  @param private_fn: Callable for constructing a private data field for a
2133
    modification
2134
  @rtype: list
2135

2136
  """
2137
  if private_fn is None:
2138
    fn = lambda: None
2139
  else:
2140
    fn = private_fn
2141

    
2142
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2143

    
2144

    
2145
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2146
  """Checks if nodes have enough physical CPUs
2147

2148
  This function checks if all given nodes have the needed number of
2149
  physical CPUs. In case any node has less CPUs or we cannot get the
2150
  information from the node, this function raises an OpPrereqError
2151
  exception.
2152

2153
  @type lu: C{LogicalUnit}
2154
  @param lu: a logical unit from which we get configuration data
2155
  @type node_uuids: C{list}
2156
  @param node_uuids: the list of node UUIDs to check
2157
  @type requested: C{int}
2158
  @param requested: the minimum acceptable number of physical CPUs
2159
  @type hypervisor_specs: list of pairs (string, dict of strings)
2160
  @param hypervisor_specs: list of hypervisor specifications in
2161
      pairs (hypervisor_name, hvparams)
2162
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2163
      or we cannot check the node
2164

2165
  """
2166
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2167
  for node_uuid in node_uuids:
2168
    info = nodeinfo[node_uuid]
2169
    node_name = lu.cfg.GetNodeName(node_uuid)
2170
    info.Raise("Cannot get current information from node %s" % node_name,
2171
               prereq=True, ecode=errors.ECODE_ENVIRON)
2172
    (_, _, (hv_info, )) = info.payload
2173
    num_cpus = hv_info.get("cpu_total", None)
2174
    if not isinstance(num_cpus, int):
2175
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2176
                                 " on node %s, result was '%s'" %
2177
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2178
    if requested > num_cpus:
2179
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2180
                                 "required" % (node_name, num_cpus, requested),
2181
                                 errors.ECODE_NORES)
2182

    
2183

    
2184
def GetItemFromContainer(identifier, kind, container):
2185
  """Return the item refered by the identifier.
2186

2187
  @type identifier: string
2188
  @param identifier: Item index or name or UUID
2189
  @type kind: string
2190
  @param kind: One-word item description
2191
  @type container: list
2192
  @param container: Container to get the item from
2193

2194
  """
2195
  # Index
2196
  try:
2197
    idx = int(identifier)
2198
    if idx == -1:
2199
      # Append
2200
      absidx = len(container) - 1
2201
    elif idx < 0:
2202
      raise IndexError("Not accepting negative indices other than -1")
2203
    elif idx > len(container):
2204
      raise IndexError("Got %s index %s, but there are only %s" %
2205
                       (kind, idx, len(container)))
2206
    else:
2207
      absidx = idx
2208
    return (absidx, container[idx])
2209
  except ValueError:
2210
    pass
2211

    
2212
  for idx, item in enumerate(container):
2213
    if item.uuid == identifier or item.name == identifier:
2214
      return (idx, item)
2215

    
2216
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2217
                             (kind, identifier), errors.ECODE_NOENT)
2218

    
2219

    
2220
def _ApplyContainerMods(kind, container, chgdesc, mods,
2221
                        create_fn, modify_fn, remove_fn,
2222
                        post_add_fn=None):
2223
  """Applies descriptions in C{mods} to C{container}.
2224

2225
  @type kind: string
2226
  @param kind: One-word item description
2227
  @type container: list
2228
  @param container: Container to modify
2229
  @type chgdesc: None or list
2230
  @param chgdesc: List of applied changes
2231
  @type mods: list
2232
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2233
  @type create_fn: callable
2234
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2235
    receives absolute item index, parameters and private data object as added
2236
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2237
    as list
2238
  @type modify_fn: callable
2239
  @param modify_fn: Callback for modifying an existing item
2240
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2241
    and private data object as added by L{_PrepareContainerMods}, returns
2242
    changes as list
2243
  @type remove_fn: callable
2244
  @param remove_fn: Callback on removing item; receives absolute item index,
2245
    item and private data object as added by L{_PrepareContainerMods}
2246
  @type post_add_fn: callable
2247
  @param post_add_fn: Callable for post-processing a newly created item after
2248
    it has been put into the container. It receives the index of the new item
2249
    and the new item as parameters.
2250

2251
  """
2252
  for (op, identifier, params, private) in mods:
2253
    changes = None
2254

    
2255
    if op == constants.DDM_ADD:
2256
      # Calculate where item will be added
2257
      # When adding an item, identifier can only be an index
2258
      try:
2259
        idx = int(identifier)
2260
      except ValueError:
2261
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2262
                                   " identifier for %s" % constants.DDM_ADD,
2263
                                   errors.ECODE_INVAL)
2264
      if idx == -1:
2265
        addidx = len(container)
2266
      else:
2267
        if idx < 0:
2268
          raise IndexError("Not accepting negative indices other than -1")
2269
        elif idx > len(container):
2270
          raise IndexError("Got %s index %s, but there are only %s" %
2271
                           (kind, idx, len(container)))
2272
        addidx = idx
2273

    
2274
      if create_fn is None:
2275
        item = params
2276
      else:
2277
        (item, changes) = create_fn(addidx, params, private)
2278

    
2279
      if idx == -1:
2280
        container.append(item)
2281
      else:
2282
        assert idx >= 0
2283
        assert idx <= len(container)
2284
        # list.insert does so before the specified index
2285
        container.insert(idx, item)
2286

    
2287
      if post_add_fn is not None:
2288
        post_add_fn(addidx, item)
2289

    
2290
    else:
2291
      # Retrieve existing item
2292
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2293

    
2294
      if op == constants.DDM_REMOVE:
2295
        assert not params
2296

    
2297
        changes = [("%s/%s" % (kind, absidx), "remove")]
2298

    
2299
        if remove_fn is not None:
2300
          msg = remove_fn(absidx, item, private)
2301
          if msg:
2302
            changes.append(("%s/%s" % (kind, absidx), msg))
2303

    
2304
        assert container[absidx] == item
2305
        del container[absidx]
2306
      elif op == constants.DDM_MODIFY:
2307
        if modify_fn is not None:
2308
          changes = modify_fn(absidx, item, params, private)
2309
      else:
2310
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2311

    
2312
    assert _TApplyContModsCbChanges(changes)
2313

    
2314
    if not (chgdesc is None or changes is None):
2315
      chgdesc.extend(changes)
2316

    
2317

    
2318
def _UpdateIvNames(base_index, disks):
2319
  """Updates the C{iv_name} attribute of disks.
2320

2321
  @type disks: list of L{objects.Disk}
2322

2323
  """
2324
  for (idx, disk) in enumerate(disks):
2325
    disk.iv_name = "disk/%s" % (base_index + idx, )
2326

    
2327

    
2328
class LUInstanceSetParams(LogicalUnit):
2329
  """Modifies an instances's parameters.
2330

2331
  """
2332
  HPATH = "instance-modify"
2333
  HTYPE = constants.HTYPE_INSTANCE
2334
  REQ_BGL = False
2335

    
2336
  @staticmethod
2337
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2338
    assert ht.TList(mods)
2339
    assert not mods or len(mods[0]) in (2, 3)
2340

    
2341
    if mods and len(mods[0]) == 2:
2342
      result = []
2343

    
2344
      addremove = 0
2345
      for op, params in mods:
2346
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2347
          result.append((op, -1, params))
2348
          addremove += 1
2349

    
2350
          if addremove > 1:
2351
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2352
                                       " supported at a time" % kind,
2353
                                       errors.ECODE_INVAL)
2354
        else:
2355
          result.append((constants.DDM_MODIFY, op, params))
2356

    
2357
      assert verify_fn(result)
2358
    else:
2359
      result = mods
2360

    
2361
    return result
2362

    
2363
  @staticmethod
2364
  def _CheckMods(kind, mods, key_types, item_fn):
2365
    """Ensures requested disk/NIC modifications are valid.
2366

2367
    """
2368
    for (op, _, params) in mods:
2369
      assert ht.TDict(params)
2370

    
2371
      # If 'key_types' is an empty dict, we assume we have an
2372
      # 'ext' template and thus do not ForceDictType
2373
      if key_types:
2374
        utils.ForceDictType(params, key_types)
2375

    
2376
      if op == constants.DDM_REMOVE:
2377
        if params:
2378
          raise errors.OpPrereqError("No settings should be passed when"
2379
                                     " removing a %s" % kind,
2380
                                     errors.ECODE_INVAL)
2381
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2382
        item_fn(op, params)
2383
      else:
2384
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2385

    
2386
  def _VerifyDiskModification(self, op, params, excl_stor):
2387
    """Verifies a disk modification.
2388

2389
    """
2390
    if op == constants.DDM_ADD:
2391
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2392
      if mode not in constants.DISK_ACCESS_SET:
2393
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2394
                                   errors.ECODE_INVAL)
2395

    
2396
      size = params.get(constants.IDISK_SIZE, None)
2397
      if size is None:
2398
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2399
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2400
      size = int(size)
2401

    
2402
      params[constants.IDISK_SIZE] = size
2403
      name = params.get(constants.IDISK_NAME, None)
2404
      if name is not None and name.lower() == constants.VALUE_NONE:
2405
        params[constants.IDISK_NAME] = None
2406

    
2407
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2408

    
2409
    elif op == constants.DDM_MODIFY:
2410
      if constants.IDISK_SIZE in params:
2411
        raise errors.OpPrereqError("Disk size change not possible, use"
2412
                                   " grow-disk", errors.ECODE_INVAL)
2413

    
2414
      # Disk modification supports changing only the disk name and mode.
2415
      # Changing arbitrary parameters is allowed only for ext disk template",
2416
      if self.instance.disk_template != constants.DT_EXT:
2417
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2418

    
2419
      name = params.get(constants.IDISK_NAME, None)
2420
      if name is not None and name.lower() == constants.VALUE_NONE:
2421
        params[constants.IDISK_NAME] = None
2422

    
2423
  @staticmethod
2424
  def _VerifyNicModification(op, params):
2425
    """Verifies a network interface modification.
2426

2427
    """
2428
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2429
      ip = params.get(constants.INIC_IP, None)
2430
      name = params.get(constants.INIC_NAME, None)
2431
      req_net = params.get(constants.INIC_NETWORK, None)
2432
      link = params.get(constants.NIC_LINK, None)
2433
      mode = params.get(constants.NIC_MODE, None)
2434
      if name is not None and name.lower() == constants.VALUE_NONE:
2435
        params[constants.INIC_NAME] = None
2436
      if req_net is not None:
2437
        if req_net.lower() == constants.VALUE_NONE:
2438
          params[constants.INIC_NETWORK] = None
2439
          req_net = None
2440
        elif link is not None or mode is not None:
2441
          raise errors.OpPrereqError("If network is given"
2442
                                     " mode or link should not",
2443
                                     errors.ECODE_INVAL)
2444

    
2445
      if op == constants.DDM_ADD:
2446
        macaddr = params.get(constants.INIC_MAC, None)
2447
        if macaddr is None:
2448
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2449

    
2450
      if ip is not None:
2451
        if ip.lower() == constants.VALUE_NONE:
2452
          params[constants.INIC_IP] = None
2453
        else:
2454
          if ip.lower() == constants.NIC_IP_POOL:
2455
            if op == constants.DDM_ADD and req_net is None:
2456
              raise errors.OpPrereqError("If ip=pool, parameter network"
2457
                                         " cannot be none",
2458
                                         errors.ECODE_INVAL)
2459
          else:
2460
            if not netutils.IPAddress.IsValid(ip):
2461
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2462
                                         errors.ECODE_INVAL)
2463

    
2464
      if constants.INIC_MAC in params:
2465
        macaddr = params[constants.INIC_MAC]
2466
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2467
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2468

    
2469
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2470
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2471
                                     " modifying an existing NIC",
2472
                                     errors.ECODE_INVAL)
2473

    
2474
  def CheckArguments(self):
2475
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2476
            self.op.hvparams or self.op.beparams or self.op.os_name or
2477
            self.op.osparams or self.op.offline is not None or
2478
            self.op.runtime_mem or self.op.pnode):
2479
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2480

    
2481
    if self.op.hvparams:
2482
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2483
                           "hypervisor", "instance", "cluster")
2484

    
2485
    self.op.disks = self._UpgradeDiskNicMods(
2486
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2487
    self.op.nics = self._UpgradeDiskNicMods(
2488
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2489

    
2490
    if self.op.disks and self.op.disk_template is not None:
2491
      raise errors.OpPrereqError("Disk template conversion and other disk"
2492
                                 " changes not supported at the same time",
2493
                                 errors.ECODE_INVAL)
2494

    
2495
    if (self.op.disk_template and
2496
        self.op.disk_template in constants.DTS_INT_MIRROR and
2497
        self.op.remote_node is None):
2498
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2499
                                 " one requires specifying a secondary node",
2500
                                 errors.ECODE_INVAL)
2501

    
2502
    # Check NIC modifications
2503
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2504
                    self._VerifyNicModification)
2505

    
2506
    if self.op.pnode:
2507
      (self.op.pnode_uuid, self.op.pnode) = \
2508
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2509

    
2510
  def ExpandNames(self):
2511
    self._ExpandAndLockInstance()
2512
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2513
    # Can't even acquire node locks in shared mode as upcoming changes in
2514
    # Ganeti 2.6 will start to modify the node object on disk conversion
2515
    self.needed_locks[locking.LEVEL_NODE] = []
2516
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2517
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2518
    # Look node group to look up the ipolicy
2519
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2520

    
2521
  def DeclareLocks(self, level):
2522
    if level == locking.LEVEL_NODEGROUP:
2523
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2524
      # Acquire locks for the instance's nodegroups optimistically. Needs
2525
      # to be verified in CheckPrereq
2526
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2527
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2528
    elif level == locking.LEVEL_NODE:
2529
      self._LockInstancesNodes()
2530
      if self.op.disk_template and self.op.remote_node:
2531
        (self.op.remote_node_uuid, self.op.remote_node) = \
2532
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2533
                                self.op.remote_node)
2534
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2535
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2536
      # Copy node locks
2537
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2538
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2539

    
2540
  def BuildHooksEnv(self):
2541
    """Build hooks env.
2542

2543
    This runs on the master, primary and secondaries.
2544

2545
    """
2546
    args = {}
2547
    if constants.BE_MINMEM in self.be_new:
2548
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2549
    if constants.BE_MAXMEM in self.be_new:
2550
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2551
    if constants.BE_VCPUS in self.be_new:
2552
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2553
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2554
    # information at all.
2555

    
2556
    if self._new_nics is not None:
2557
      nics = []
2558

    
2559
      for nic in self._new_nics:
2560
        n = copy.deepcopy(nic)
2561
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2562
        n.nicparams = nicparams
2563
        nics.append(NICToTuple(self, n))
2564

    
2565
      args["nics"] = nics
2566

    
2567
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2568
    if self.op.disk_template:
2569
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2570
    if self.op.runtime_mem:
2571
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2572

    
2573
    return env
2574

    
2575
  def BuildHooksNodes(self):
2576
    """Build hooks nodes.
2577

2578
    """
2579
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2580
    return (nl, nl)
2581

    
2582
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2583
                              old_params, cluster, pnode_uuid):
2584

    
2585
    update_params_dict = dict([(key, params[key])
2586
                               for key in constants.NICS_PARAMETERS
2587
                               if key in params])
2588

    
2589
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2590
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2591

    
2592
    new_net_uuid = None
2593
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2594
    if new_net_uuid_or_name:
2595
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2596
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2597

    
2598
    if old_net_uuid:
2599
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2600

    
2601
    if new_net_uuid:
2602
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2603
      if not netparams:
2604
        raise errors.OpPrereqError("No netparams found for the network"
2605
                                   " %s, probably not connected" %
2606
                                   new_net_obj.name, errors.ECODE_INVAL)
2607
      new_params = dict(netparams)
2608
    else:
2609
      new_params = GetUpdatedParams(old_params, update_params_dict)
2610

    
2611
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2612

    
2613
    new_filled_params = cluster.SimpleFillNIC(new_params)
2614
    objects.NIC.CheckParameterSyntax(new_filled_params)
2615

    
2616
    new_mode = new_filled_params[constants.NIC_MODE]
2617
    if new_mode == constants.NIC_MODE_BRIDGED:
2618
      bridge = new_filled_params[constants.NIC_LINK]
2619
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2620
      if msg:
2621
        msg = "Error checking bridges on node '%s': %s" % \
2622
                (self.cfg.GetNodeName(pnode_uuid), msg)
2623
        if self.op.force:
2624
          self.warn.append(msg)
2625
        else:
2626
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2627

    
2628
    elif new_mode == constants.NIC_MODE_ROUTED:
2629
      ip = params.get(constants.INIC_IP, old_ip)
2630
      if ip is None:
2631
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2632
                                   " on a routed NIC", errors.ECODE_INVAL)
2633

    
2634
    elif new_mode == constants.NIC_MODE_OVS:
2635
      # TODO: check OVS link
2636
      self.LogInfo("OVS links are currently not checked for correctness")
2637

    
2638
    if constants.INIC_MAC in params:
2639
      mac = params[constants.INIC_MAC]
2640
      if mac is None:
2641
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2642
                                   errors.ECODE_INVAL)
2643
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2644
        # otherwise generate the MAC address
2645
        params[constants.INIC_MAC] = \
2646
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2647
      else:
2648
        # or validate/reserve the current one
2649
        try:
2650
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2651
        except errors.ReservationError:
2652
          raise errors.OpPrereqError("MAC address '%s' already in use"
2653
                                     " in cluster" % mac,
2654
                                     errors.ECODE_NOTUNIQUE)
2655
    elif new_net_uuid != old_net_uuid:
2656

    
2657
      def get_net_prefix(net_uuid):
2658
        mac_prefix = None
2659
        if net_uuid:
2660
          nobj = self.cfg.GetNetwork(net_uuid)
2661
          mac_prefix = nobj.mac_prefix
2662

    
2663
        return mac_prefix
2664

    
2665
      new_prefix = get_net_prefix(new_net_uuid)
2666
      old_prefix = get_net_prefix(old_net_uuid)
2667
      if old_prefix != new_prefix:
2668
        params[constants.INIC_MAC] = \
2669
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2670

    
2671
    # if there is a change in (ip, network) tuple
2672
    new_ip = params.get(constants.INIC_IP, old_ip)
2673
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2674
      if new_ip:
2675
        # if IP is pool then require a network and generate one IP
2676
        if new_ip.lower() == constants.NIC_IP_POOL:
2677
          if new_net_uuid:
2678
            try:
2679
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2680
            except errors.ReservationError:
2681
              raise errors.OpPrereqError("Unable to get a free IP"
2682
                                         " from the address pool",
2683
                                         errors.ECODE_STATE)
2684
            self.LogInfo("Chose IP %s from network %s",
2685
                         new_ip,
2686
                         new_net_obj.name)
2687
            params[constants.INIC_IP] = new_ip
2688
          else:
2689
            raise errors.OpPrereqError("ip=pool, but no network found",
2690
                                       errors.ECODE_INVAL)
2691
        # Reserve new IP if in the new network if any
2692
        elif new_net_uuid:
2693
          try:
2694
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2695
                               check=self.op.conflicts_check)
2696
            self.LogInfo("Reserving IP %s in network %s",
2697
                         new_ip, new_net_obj.name)
2698
          except errors.ReservationError:
2699
            raise errors.OpPrereqError("IP %s not available in network %s" %
2700
                                       (new_ip, new_net_obj.name),
2701
                                       errors.ECODE_NOTUNIQUE)
2702
        # new network is None so check if new IP is a conflicting IP
2703
        elif self.op.conflicts_check:
2704
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2705

    
2706
      # release old IP if old network is not None
2707
      if old_ip and old_net_uuid:
2708
        try:
2709
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2710
        except errors.AddressPoolError:
2711
          logging.warning("Release IP %s not contained in network %s",
2712
                          old_ip, old_net_obj.name)
2713

    
2714
    # there are no changes in (ip, network) tuple and old network is not None
2715
    elif (old_net_uuid is not None and
2716
          (req_link is not None or req_mode is not None)):
2717
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2718
                                 " a NIC that is connected to a network",
2719
                                 errors.ECODE_INVAL)
2720

    
2721
    private.params = new_params
2722
    private.filled = new_filled_params
2723

    
2724
  def _PreCheckDiskTemplate(self, pnode_info):
2725
    """CheckPrereq checks related to a new disk template."""
2726
    # Arguments are passed to avoid configuration lookups
2727
    pnode_uuid = self.instance.primary_node
2728
    if self.instance.disk_template == self.op.disk_template:
2729
      raise errors.OpPrereqError("Instance already has disk template %s" %
2730
                                 self.instance.disk_template,
2731
                                 errors.ECODE_INVAL)
2732

    
2733
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2734
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2735
                                 " cluster." % self.op.disk_template)
2736

    
2737
    if (self.instance.disk_template,
2738
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2739
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2740
                                 " %s to %s" % (self.instance.disk_template,
2741
                                                self.op.disk_template),
2742
                                 errors.ECODE_INVAL)
2743
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2744
                       msg="cannot change disk template")
2745
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2746
      if self.op.remote_node_uuid == pnode_uuid:
2747
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2748
                                   " as the primary node of the instance" %
2749
                                   self.op.remote_node, errors.ECODE_STATE)
2750
      CheckNodeOnline(self, self.op.remote_node_uuid)
2751
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2752
      # FIXME: here we assume that the old instance type is DT_PLAIN
2753
      assert self.instance.disk_template == constants.DT_PLAIN
2754
      disks = [{constants.IDISK_SIZE: d.size,
2755
                constants.IDISK_VG: d.logical_id[0]}
2756
               for d in self.instance.disks]
2757
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2758
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2759

    
2760
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2761
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2762
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2763
                                                              snode_group)
2764
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2765
                             ignore=self.op.ignore_ipolicy)
2766
      if pnode_info.group != snode_info.group:
2767
        self.LogWarning("The primary and secondary nodes are in two"
2768
                        " different node groups; the disk parameters"
2769
                        " from the first disk's node group will be"
2770
                        " used")
2771

    
2772
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2773
      # Make sure none of the nodes require exclusive storage
2774
      nodes = [pnode_info]
2775
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2776
        assert snode_info
2777
        nodes.append(snode_info)
2778
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2779
      if compat.any(map(has_es, nodes)):
2780
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2781
                  " storage is enabled" % (self.instance.disk_template,
2782
                                           self.op.disk_template))
2783
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2784

    
2785
  def _PreCheckDisks(self, ispec):
2786
    """CheckPrereq checks related to disk changes.
2787

2788
    @type ispec: dict
2789
    @param ispec: instance specs to be updated with the new disks
2790

2791
    """
2792
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2793

    
2794
    excl_stor = compat.any(
2795
      rpc.GetExclusiveStorageForNodes(self.cfg,
2796
                                      self.instance.all_nodes).values()
2797
      )
2798

    
2799
    # Check disk modifications. This is done here and not in CheckArguments
2800
    # (as with NICs), because we need to know the instance's disk template
2801
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2802
    if self.instance.disk_template == constants.DT_EXT:
2803
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2804
    else:
2805
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2806
                      ver_fn)
2807

    
2808
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2809

    
2810
    # Check the validity of the `provider' parameter
2811
    if self.instance.disk_template in constants.DT_EXT:
2812
      for mod in self.diskmod:
2813
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2814
        if mod[0] == constants.DDM_ADD:
2815
          if ext_provider is None:
2816
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2817
                                       " '%s' missing, during disk add" %
2818
                                       (constants.DT_EXT,
2819
                                        constants.IDISK_PROVIDER),
2820
                                       errors.ECODE_NOENT)
2821
        elif mod[0] == constants.DDM_MODIFY:
2822
          if ext_provider:
2823
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2824
                                       " modification" %
2825
                                       constants.IDISK_PROVIDER,
2826
                                       errors.ECODE_INVAL)
2827
    else:
2828
      for mod in self.diskmod:
2829
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2830
        if ext_provider is not None:
2831
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2832
                                     " instances of type '%s'" %
2833
                                     (constants.IDISK_PROVIDER,
2834
                                      constants.DT_EXT),
2835
                                     errors.ECODE_INVAL)
2836

    
2837
    if not self.op.wait_for_sync and self.instance.disks_active:
2838
      for mod in self.diskmod:
2839
        if mod[0] == constants.DDM_ADD:
2840
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2841
                                     " activated disks and"
2842
                                     " --no-wait-for-sync given.",
2843
                                     errors.ECODE_INVAL)
2844

    
2845
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2846
      raise errors.OpPrereqError("Disk operations not supported for"
2847
                                 " diskless instances", errors.ECODE_INVAL)
2848

    
2849
    def _PrepareDiskMod(_, disk, params, __):
2850
      disk.name = params.get(constants.IDISK_NAME, None)
2851

    
2852
    # Verify disk changes (operating on a copy)
2853
    disks = copy.deepcopy(self.instance.disks)
2854
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2855
                        _PrepareDiskMod, None)
2856
    utils.ValidateDeviceNames("disk", disks)
2857
    if len(disks) > constants.MAX_DISKS:
2858
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2859
                                 " more" % constants.MAX_DISKS,
2860
                                 errors.ECODE_STATE)
2861
    disk_sizes = [disk.size for disk in self.instance.disks]
2862
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2863
                      self.diskmod if op == constants.DDM_ADD)
2864
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2865
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2866

    
2867
    if self.op.offline is not None and self.op.offline:
2868
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2869
                         msg="can't change to offline")
2870

    
2871
  def CheckPrereq(self):
2872
    """Check prerequisites.
2873

2874
    This only checks the instance list against the existing names.
2875

2876
    """
2877
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2878
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2879
    self.cluster = self.cfg.GetClusterInfo()
2880
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2881

    
2882
    assert self.instance is not None, \
2883
      "Cannot retrieve locked instance %s" % self.op.instance_name
2884

    
2885
    pnode_uuid = self.instance.primary_node
2886

    
2887
    self.warn = []
2888

    
2889
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2890
        not self.op.force):
2891
      # verify that the instance is not up
2892
      instance_info = self.rpc.call_instance_info(
2893
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2894
          cluster_hvparams)
2895
      if instance_info.fail_msg:
2896
        self.warn.append("Can't get instance runtime information: %s" %
2897
                         instance_info.fail_msg)
2898
      elif instance_info.payload:
2899
        raise errors.OpPrereqError("Instance is still running on %s" %
2900
                                   self.cfg.GetNodeName(pnode_uuid),
2901
                                   errors.ECODE_STATE)
2902

    
2903
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2904
    node_uuids = list(self.instance.all_nodes)
2905
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2906

    
2907
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2908
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2909
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2910

    
2911
    # dictionary with instance information after the modification
2912
    ispec = {}
2913

    
2914
    if self.op.hotplug or self.op.hotplug_if_possible:
2915
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2916
                                               self.instance)
2917
      if result.fail_msg:
2918
        if self.op.hotplug:
2919
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2920
                       prereq=True)
2921
        else:
2922
          self.LogWarning(result.fail_msg)
2923
          self.op.hotplug = False
2924
          self.LogInfo("Modification will take place without hotplugging.")
2925
      else:
2926
        self.op.hotplug = True
2927

    
2928
    # Prepare NIC modifications
2929
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2930

    
2931
    # OS change
2932
    if self.op.os_name and not self.op.force:
2933
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2934
                     self.op.force_variant)
2935
      instance_os = self.op.os_name
2936
    else:
2937
      instance_os = self.instance.os
2938

    
2939
    assert not (self.op.disk_template and self.op.disks), \
2940
      "Can't modify disk template and apply disk changes at the same time"
2941

    
2942
    if self.op.disk_template:
2943
      self._PreCheckDiskTemplate(pnode_info)
2944

    
2945
    self._PreCheckDisks(ispec)
2946

    
2947
    # hvparams processing
2948
    if self.op.hvparams:
2949
      hv_type = self.instance.hypervisor
2950
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2951
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2952
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2953

    
2954
      # local check
2955
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2956
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2957
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2958
      self.hv_inst = i_hvdict # the new dict (without defaults)
2959
    else:
2960
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2961
                                                   self.instance.os,
2962
                                                   self.instance.hvparams)
2963
      self.hv_new = self.hv_inst = {}
2964

    
2965
    # beparams processing
2966
    if self.op.beparams:
2967
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2968
                                  use_none=True)
2969
      objects.UpgradeBeParams(i_bedict)
2970
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2971
      be_new = self.cluster.SimpleFillBE(i_bedict)
2972
      self.be_proposed = self.be_new = be_new # the new actual values
2973
      self.be_inst = i_bedict # the new dict (without defaults)
2974
    else:
2975
      self.be_new = self.be_inst = {}
2976
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2977
    be_old = self.cluster.FillBE(self.instance)
2978

    
2979
    # CPU param validation -- checking every time a parameter is
2980
    # changed to cover all cases where either CPU mask or vcpus have
2981
    # changed
2982
    if (constants.BE_VCPUS in self.be_proposed and
2983
        constants.HV_CPU_MASK in self.hv_proposed):
2984
      cpu_list = \
2985
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2986
      # Verify mask is consistent with number of vCPUs. Can skip this
2987
      # test if only 1 entry in the CPU mask, which means same mask
2988
      # is applied to all vCPUs.
2989
      if (len(cpu_list) > 1 and
2990
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2991
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2992
                                   " CPU mask [%s]" %
2993
                                   (self.be_proposed[constants.BE_VCPUS],
2994
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2995
                                   errors.ECODE_INVAL)
2996

    
2997
      # Only perform this test if a new CPU mask is given
2998
      if constants.HV_CPU_MASK in self.hv_new:
2999
        # Calculate the largest CPU number requested
3000
        max_requested_cpu = max(map(max, cpu_list))
3001
        # Check that all of the instance's nodes have enough physical CPUs to
3002
        # satisfy the requested CPU mask
3003
        hvspecs = [(self.instance.hypervisor,
3004
                    self.cfg.GetClusterInfo()
3005
                      .hvparams[self.instance.hypervisor])]
3006
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3007
                                max_requested_cpu + 1,
3008
                                hvspecs)
3009

    
3010
    # osparams processing
3011
    if self.op.osparams:
3012
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
3013
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
3014
      self.os_inst = i_osdict # the new dict (without defaults)
3015
    else:
3016
      self.os_inst = {}
3017

    
3018
    #TODO(dynmem): do the appropriate check involving MINMEM
3019
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3020
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3021
      mem_check_list = [pnode_uuid]
3022
      if be_new[constants.BE_AUTO_BALANCE]:
3023
        # either we changed auto_balance to yes or it was from before
3024
        mem_check_list.extend(self.instance.secondary_nodes)
3025
      instance_info = self.rpc.call_instance_info(
3026
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3027
          cluster_hvparams)
3028
      hvspecs = [(self.instance.hypervisor,
3029
                  cluster_hvparams)]
3030
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3031
                                         hvspecs)
3032
      pninfo = nodeinfo[pnode_uuid]
3033
      msg = pninfo.fail_msg
3034
      if msg:
3035
        # Assume the primary node is unreachable and go ahead
3036
        self.warn.append("Can't get info from primary node %s: %s" %
3037
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3038
      else:
3039
        (_, _, (pnhvinfo, )) = pninfo.payload
3040
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3041
          self.warn.append("Node data from primary node %s doesn't contain"
3042
                           " free memory information" %
3043
                           self.cfg.GetNodeName(pnode_uuid))
3044
        elif instance_info.fail_msg:
3045
          self.warn.append("Can't get instance runtime information: %s" %
3046
                           instance_info.fail_msg)
3047
        else:
3048
          if instance_info.payload:
3049
            current_mem = int(instance_info.payload["memory"])
3050
          else:
3051
            # Assume instance not running
3052
            # (there is a slight race condition here, but it's not very
3053
            # probable, and we have no other way to check)
3054
            # TODO: Describe race condition
3055
            current_mem = 0
3056
          #TODO(dynmem): do the appropriate check involving MINMEM
3057
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3058
                      pnhvinfo["memory_free"])
3059
          if miss_mem > 0:
3060
            raise errors.OpPrereqError("This change will prevent the instance"
3061
                                       " from starting, due to %d MB of memory"
3062
                                       " missing on its primary node" %
3063
                                       miss_mem, errors.ECODE_NORES)
3064

    
3065
      if be_new[constants.BE_AUTO_BALANCE]:
3066
        for node_uuid, nres in nodeinfo.items():
3067
          if node_uuid not in self.instance.secondary_nodes:
3068
            continue
3069
          nres.Raise("Can't get info from secondary node %s" %
3070
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3071
                     ecode=errors.ECODE_STATE)
3072
          (_, _, (nhvinfo, )) = nres.payload
3073
          if not isinstance(nhvinfo.get("memory_free", None), int):
3074
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3075
                                       " memory information" %
3076
                                       self.cfg.GetNodeName(node_uuid),
3077
                                       errors.ECODE_STATE)
3078
          #TODO(dynmem): do the appropriate check involving MINMEM
3079
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3080
            raise errors.OpPrereqError("This change will prevent the instance"
3081
                                       " from failover to its secondary node"
3082
                                       " %s, due to not enough memory" %
3083
                                       self.cfg.GetNodeName(node_uuid),
3084
                                       errors.ECODE_STATE)
3085

    
3086
    if self.op.runtime_mem:
3087
      remote_info = self.rpc.call_instance_info(
3088
         self.instance.primary_node, self.instance.name,
3089
         self.instance.hypervisor,
3090
         cluster_hvparams)
3091
      remote_info.Raise("Error checking node %s" %
3092
                        self.cfg.GetNodeName(self.instance.primary_node))
3093
      if not remote_info.payload: # not running already
3094
        raise errors.OpPrereqError("Instance %s is not running" %
3095
                                   self.instance.name, errors.ECODE_STATE)
3096

    
3097
      current_memory = remote_info.payload["memory"]
3098
      if (not self.op.force and
3099
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3100
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3101
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3102
                                   " and %d MB of memory unless --force is"
3103
                                   " given" %
3104
                                   (self.instance.name,
3105
                                    self.be_proposed[constants.BE_MINMEM],
3106
                                    self.be_proposed[constants.BE_MAXMEM]),
3107
                                   errors.ECODE_INVAL)
3108

    
3109
      delta = self.op.runtime_mem - current_memory
3110
      if delta > 0:
3111
        CheckNodeFreeMemory(
3112
            self, self.instance.primary_node,
3113
            "ballooning memory for instance %s" % self.instance.name, delta,
3114
            self.instance.hypervisor,
3115
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3116

    
3117
    # make self.cluster visible in the functions below
3118
    cluster = self.cluster
3119

    
3120
    def _PrepareNicCreate(_, params, private):
3121
      self._PrepareNicModification(params, private, None, None,
3122
                                   {}, cluster, pnode_uuid)
3123
      return (None, None)
3124

    
3125
    def _PrepareNicMod(_, nic, params, private):
3126
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3127
                                   nic.nicparams, cluster, pnode_uuid)
3128
      return None
3129

    
3130
    def _PrepareNicRemove(_, params, __):
3131
      ip = params.ip
3132
      net = params.network
3133
      if net is not None and ip is not None:
3134
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3135

    
3136
    # Verify NIC changes (operating on copy)
3137
    nics = self.instance.nics[:]
3138
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3139
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3140
    if len(nics) > constants.MAX_NICS:
3141
      raise errors.OpPrereqError("Instance has too many network interfaces"
3142
                                 " (%d), cannot add more" % constants.MAX_NICS,
3143
                                 errors.ECODE_STATE)
3144

    
3145
    # Pre-compute NIC changes (necessary to use result in hooks)
3146
    self._nic_chgdesc = []
3147
    if self.nicmod:
3148
      # Operate on copies as this is still in prereq
3149
      nics = [nic.Copy() for nic in self.instance.nics]
3150
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3151
                          self._CreateNewNic, self._ApplyNicMods,
3152
                          self._RemoveNic)
3153
      # Verify that NIC names are unique and valid
3154
      utils.ValidateDeviceNames("NIC", nics)
3155
      self._new_nics = nics
3156
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3157
    else:
3158
      self._new_nics = None
3159
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3160

    
3161
    if not self.op.ignore_ipolicy:
3162
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3163
                                                              group_info)
3164

    
3165
      # Fill ispec with backend parameters
3166
      ispec[constants.ISPEC_SPINDLE_USE] = \
3167
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3168
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3169
                                                         None)
3170

    
3171
      # Copy ispec to verify parameters with min/max values separately
3172
      if self.op.disk_template:
3173
        new_disk_template = self.op.disk_template
3174
      else:
3175
        new_disk_template = self.instance.disk_template
3176
      ispec_max = ispec.copy()
3177
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3178
        self.be_new.get(constants.BE_MAXMEM, None)
3179
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3180
                                                     new_disk_template)
3181
      ispec_min = ispec.copy()
3182
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3183
        self.be_new.get(constants.BE_MINMEM, None)
3184
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3185
                                                     new_disk_template)
3186

    
3187
      if (res_max or res_min):
3188
        # FIXME: Improve error message by including information about whether
3189
        # the upper or lower limit of the parameter fails the ipolicy.
3190
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3191
               (group_info, group_info.name,
3192
                utils.CommaJoin(set(res_max + res_min))))
3193
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3194

    
3195
  def _ConvertPlainToDrbd(self, feedback_fn):
3196
    """Converts an instance from plain to drbd.
3197

3198
    """
3199
    feedback_fn("Converting template to drbd")
3200
    pnode_uuid = self.instance.primary_node
3201
    snode_uuid = self.op.remote_node_uuid
3202

    
3203
    assert self.instance.disk_template == constants.DT_PLAIN
3204

    
3205
    # create a fake disk info for _GenerateDiskTemplate
3206
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3207
                  constants.IDISK_VG: d.logical_id[0],
3208
                  constants.IDISK_NAME: d.name}
3209
                 for d in self.instance.disks]
3210
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3211
                                     self.instance.uuid, pnode_uuid,
3212
                                     [snode_uuid], disk_info, None, None, 0,
3213
                                     feedback_fn, self.diskparams)
3214
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3215
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3216
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3217
    info = GetInstanceInfoText(self.instance)
3218
    feedback_fn("Creating additional volumes...")
3219
    # first, create the missing data and meta devices
3220
    for disk in anno_disks:
3221
      # unfortunately this is... not too nice
3222
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3223
                           info, True, p_excl_stor)
3224
      for child in disk.children:
3225
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3226
                             s_excl_stor)
3227
    # at this stage, all new LVs have been created, we can rename the
3228
    # old ones
3229
    feedback_fn("Renaming original volumes...")
3230
    rename_list = [(o, n.children[0].logical_id)
3231
                   for (o, n) in zip(self.instance.disks, new_disks)]
3232
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3233
    result.Raise("Failed to rename original LVs")
3234

    
3235
    feedback_fn("Initializing DRBD devices...")
3236
    # all child devices are in place, we can now create the DRBD devices
3237
    try:
3238
      for disk in anno_disks:
3239
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3240
                                       (snode_uuid, s_excl_stor)]:
3241
          f_create = node_uuid == pnode_uuid
3242
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3243
                               f_create, excl_stor)
3244
    except errors.GenericError, e:
3245
      feedback_fn("Initializing of DRBD devices failed;"
3246
                  " renaming back original volumes...")
3247
      rename_back_list = [(n.children[0], o.logical_id)
3248
                          for (n, o) in zip(new_disks, self.instance.disks)]
3249
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3250
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3251
      raise
3252

    
3253
    # at this point, the instance has been modified
3254
    self.instance.disk_template = constants.DT_DRBD8
3255
    self.instance.disks = new_disks
3256
    self.cfg.Update(self.instance, feedback_fn)
3257

    
3258
    # Release node locks while waiting for sync
3259
    ReleaseLocks(self, locking.LEVEL_NODE)
3260

    
3261
    # disks are created, waiting for sync
3262
    disk_abort = not WaitForSync(self, self.instance,
3263
                                 oneshot=not self.op.wait_for_sync)
3264
    if disk_abort:
3265
      raise errors.OpExecError("There are some degraded disks for"
3266
                               " this instance, please cleanup manually")
3267

    
3268
    # Node resource locks will be released by caller
3269

    
3270
  def _ConvertDrbdToPlain(self, feedback_fn):
3271
    """Converts an instance from drbd to plain.
3272

3273
    """
3274
    assert len(self.instance.secondary_nodes) == 1
3275
    assert self.instance.disk_template == constants.DT_DRBD8
3276

    
3277
    pnode_uuid = self.instance.primary_node
3278
    snode_uuid = self.instance.secondary_nodes[0]
3279
    feedback_fn("Converting template to plain")
3280

    
3281
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3282
    new_disks = [d.children[0] for d in self.instance.disks]
3283

    
3284
    # copy over size, mode and name
3285
    for parent, child in zip(old_disks, new_disks):
3286
      child.size = parent.size
3287
      child.mode = parent.mode
3288
      child.name = parent.name
3289

    
3290
    # this is a DRBD disk, return its port to the pool
3291
    # NOTE: this must be done right before the call to cfg.Update!
3292
    for disk in old_disks:
3293
      tcp_port = disk.logical_id[2]
3294
      self.cfg.AddTcpUdpPort(tcp_port)
3295

    
3296
    # update instance structure
3297
    self.instance.disks = new_disks
3298
    self.instance.disk_template = constants.DT_PLAIN
3299
    _UpdateIvNames(0, self.instance.disks)
3300
    self.cfg.Update(self.instance, feedback_fn)
3301

    
3302
    # Release locks in case removing disks takes a while
3303
    ReleaseLocks(self, locking.LEVEL_NODE)
3304

    
3305
    feedback_fn("Removing volumes on the secondary node...")
3306
    for disk in old_disks:
3307
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3308
      result.Warn("Could not remove block device %s on node %s,"
3309
                  " continuing anyway" %
3310
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3311
                  self.LogWarning)
3312

    
3313
    feedback_fn("Removing unneeded volumes on the primary node...")
3314
    for idx, disk in enumerate(old_disks):
3315
      meta = disk.children[1]
3316
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3317
      result.Warn("Could not remove metadata for disk %d on node %s,"
3318
                  " continuing anyway" %
3319
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3320
                  self.LogWarning)
3321

    
3322
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3323
    self.LogInfo("Trying to hotplug device...")
3324
    msg = "hotplug:"
3325
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3326
                                          self.instance, action, dev_type,
3327
                                          (device, self.instance),
3328
                                          extra, seq)
3329
    if result.fail_msg:
3330
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3331
      self.LogInfo("Continuing execution..")
3332
      msg += "failed"
3333
    else:
3334
      self.LogInfo("Hotplug done.")
3335
      msg += "done"
3336
    return msg
3337

    
3338
  def _CreateNewDisk(self, idx, params, _):
3339
    """Creates a new disk.
3340

3341
    """
3342
    # add a new disk
3343
    if self.instance.disk_template in constants.DTS_FILEBASED:
3344
      (file_driver, file_path) = self.instance.disks[0].logical_id
3345
      file_path = os.path.dirname(file_path)
3346
    else:
3347
      file_driver = file_path = None
3348

    
3349
    disk = \
3350
      GenerateDiskTemplate(self, self.instance.disk_template,
3351
                           self.instance.uuid, self.instance.primary_node,
3352
                           self.instance.secondary_nodes, [params], file_path,
3353
                           file_driver, idx, self.Log, self.diskparams)[0]
3354

    
3355
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3356

    
3357
    if self.cluster.prealloc_wipe_disks:
3358
      # Wipe new disk
3359
      WipeOrCleanupDisks(self, self.instance,
3360
                         disks=[(idx, disk, 0)],
3361
                         cleanup=new_disks)
3362

    
3363
    changes = [
3364
      ("disk/%d" % idx,
3365
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3366
      ]
3367
    if self.op.hotplug:
3368
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3369
                                               (disk, self.instance),
3370
                                               self.instance.name, True, idx)
3371
      if result.fail_msg:
3372
        changes.append(("disk/%d" % idx, "assemble:failed"))
3373
        self.LogWarning("Can't assemble newly created disk %d: %s",
3374
                        idx, result.fail_msg)
3375
      else:
3376
        _, link_name = result.payload
3377
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3378
                                  constants.HOTPLUG_TARGET_DISK,
3379
                                  disk, link_name, idx)
3380
        changes.append(("disk/%d" % idx, msg))
3381

    
3382
    return (disk, changes)
3383

    
3384
  def _PostAddDisk(self, _, disk):
3385
    if not WaitForSync(self, self.instance, disks=[disk],
3386
                       oneshot=not self.op.wait_for_sync):
3387
      raise errors.OpExecError("Failed to sync disks of %s" %
3388
                               self.instance.name)
3389

    
3390
    # the disk is active at this point, so deactivate it if the instance disks
3391
    # are supposed to be inactive
3392
    if not self.instance.disks_active:
3393
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3394

    
3395
  def _ModifyDisk(self, idx, disk, params, _):
3396
    """Modifies a disk.
3397

3398
    """
3399
    changes = []
3400
    if constants.IDISK_MODE in params:
3401
      disk.mode = params.get(constants.IDISK_MODE)
3402
      changes.append(("disk.mode/%d" % idx, disk.mode))
3403

    
3404
    if constants.IDISK_NAME in params:
3405
      disk.name = params.get(constants.IDISK_NAME)
3406
      changes.append(("disk.name/%d" % idx, disk.name))
3407

    
3408
    # Modify arbitrary params in case instance template is ext
3409
    for key, value in params.iteritems():
3410
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3411
          self.instance.disk_template == constants.DT_EXT):
3412
        # stolen from GetUpdatedParams: default means reset/delete
3413
        if value.lower() == constants.VALUE_DEFAULT:
3414
          try:
3415
            del disk.params[key]
3416
          except KeyError:
3417
            pass
3418
        else:
3419
          disk.params[key] = value
3420
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3421

    
3422
    return changes
3423

    
3424
  def _RemoveDisk(self, idx, root, _):
3425
    """Removes a disk.
3426

3427
    """
3428
    hotmsg = ""
3429
    if self.op.hotplug:
3430
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3431
                                   constants.HOTPLUG_TARGET_DISK,
3432
                                   root, None, idx)
3433
      ShutdownInstanceDisks(self, self.instance, [root])
3434

    
3435
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3436
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3437
                             self.instance.primary_node):
3438
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3439
              .fail_msg
3440
      if msg:
3441
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3442
                        " continuing anyway", idx,
3443
                        self.cfg.GetNodeName(node_uuid), msg)
3444

    
3445
    # if this is a DRBD disk, return its port to the pool
3446
    if root.dev_type in constants.DTS_DRBD:
3447
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3448

    
3449
    return hotmsg
3450

    
3451
  def _CreateNewNic(self, idx, params, private):
3452
    """Creates data structure for a new network interface.
3453

3454
    """
3455
    mac = params[constants.INIC_MAC]
3456
    ip = params.get(constants.INIC_IP, None)
3457
    net = params.get(constants.INIC_NETWORK, None)
3458
    name = params.get(constants.INIC_NAME, None)
3459
    net_uuid = self.cfg.LookupNetwork(net)
3460
    #TODO: not private.filled?? can a nic have no nicparams??
3461
    nicparams = private.filled
3462
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3463
                       nicparams=nicparams)
3464
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3465

    
3466
    changes = [
3467
      ("nic.%d" % idx,
3468
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3469
       (mac, ip, private.filled[constants.NIC_MODE],
3470
       private.filled[constants.NIC_LINK], net)),
3471
      ]
3472

    
3473
    if self.op.hotplug:
3474
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3475
                                constants.HOTPLUG_TARGET_NIC,
3476
                                nobj, None, idx)
3477
      changes.append(("nic.%d" % idx, msg))
3478

    
3479
    return (nobj, changes)
3480

    
3481
  def _ApplyNicMods(self, idx, nic, params, private):
3482
    """Modifies a network interface.
3483

3484
    """
3485
    changes = []
3486

    
3487
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3488
      if key in params:
3489
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3490
        setattr(nic, key, params[key])
3491

    
3492
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3493
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3494
    if new_net_uuid != nic.network:
3495
      changes.append(("nic.network/%d" % idx, new_net))
3496
      nic.network = new_net_uuid
3497

    
3498
    if private.filled:
3499
      nic.nicparams = private.filled
3500

    
3501
      for (key, val) in nic.nicparams.items():
3502
        changes.append(("nic.%s/%d" % (key, idx), val))
3503

    
3504
    if self.op.hotplug:
3505
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3506
                                constants.HOTPLUG_TARGET_NIC,
3507
                                nic, None, idx)
3508
      changes.append(("nic/%d" % idx, msg))
3509

    
3510
    return changes
3511

    
3512
  def _RemoveNic(self, idx, nic, _):
3513
    if self.op.hotplug:
3514
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3515
                                 constants.HOTPLUG_TARGET_NIC,
3516
                                 nic, None, idx)
3517

    
3518
  def Exec(self, feedback_fn):
3519
    """Modifies an instance.
3520

3521
    All parameters take effect only at the next restart of the instance.
3522

3523
    """
3524
    # Process here the warnings from CheckPrereq, as we don't have a
3525
    # feedback_fn there.
3526
    # TODO: Replace with self.LogWarning
3527
    for warn in self.warn:
3528
      feedback_fn("WARNING: %s" % warn)
3529

    
3530
    assert ((self.op.disk_template is None) ^
3531
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3532
      "Not owning any node resource locks"
3533

    
3534
    result = []
3535

    
3536
    # New primary node
3537
    if self.op.pnode_uuid:
3538
      self.instance.primary_node = self.op.pnode_uuid
3539

    
3540
    # runtime memory
3541
    if self.op.runtime_mem:
3542
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3543
                                                     self.instance,
3544
                                                     self.op.runtime_mem)
3545
      rpcres.Raise("Cannot modify instance runtime memory")
3546
      result.append(("runtime_memory", self.op.runtime_mem))
3547

    
3548
    # Apply disk changes
3549
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3550
                        self._CreateNewDisk, self._ModifyDisk,
3551
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3552
    _UpdateIvNames(0, self.instance.disks)
3553

    
3554
    if self.op.disk_template:
3555
      if __debug__:
3556
        check_nodes = set(self.instance.all_nodes)
3557
        if self.op.remote_node_uuid:
3558
          check_nodes.add(self.op.remote_node_uuid)
3559
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3560
          owned = self.owned_locks(level)
3561
          assert not (check_nodes - owned), \
3562
            ("Not owning the correct locks, owning %r, expected at least %r" %
3563
             (owned, check_nodes))
3564

    
3565
      r_shut = ShutdownInstanceDisks(self, self.instance)
3566
      if not r_shut:
3567
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3568
                                 " proceed with disk template conversion")
3569
      mode = (self.instance.disk_template, self.op.disk_template)
3570
      try:
3571
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3572
      except:
3573
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3574
        raise
3575
      result.append(("disk_template", self.op.disk_template))
3576

    
3577
      assert self.instance.disk_template == self.op.disk_template, \
3578
        ("Expected disk template '%s', found '%s'" %
3579
         (self.op.disk_template, self.instance.disk_template))
3580

    
3581
    # Release node and resource locks if there are any (they might already have
3582
    # been released during disk conversion)
3583
    ReleaseLocks(self, locking.LEVEL_NODE)
3584
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3585

    
3586
    # Apply NIC changes
3587
    if self._new_nics is not None:
3588
      self.instance.nics = self._new_nics
3589
      result.extend(self._nic_chgdesc)
3590

    
3591
    # hvparams changes
3592
    if self.op.hvparams:
3593
      self.instance.hvparams = self.hv_inst
3594
      for key, val in self.op.hvparams.iteritems():
3595
        result.append(("hv/%s" % key, val))
3596

    
3597
    # beparams changes
3598
    if self.op.beparams:
3599
      self.instance.beparams = self.be_inst
3600
      for key, val in self.op.beparams.iteritems():
3601
        result.append(("be/%s" % key, val))
3602

    
3603
    # OS change
3604
    if self.op.os_name:
3605
      self.instance.os = self.op.os_name
3606

    
3607
    # osparams changes
3608
    if self.op.osparams:
3609
      self.instance.osparams = self.os_inst
3610
      for key, val in self.op.osparams.iteritems():
3611
        result.append(("os/%s" % key, val))
3612

    
3613
    if self.op.offline is None:
3614
      # Ignore
3615
      pass
3616
    elif self.op.offline:
3617
      # Mark instance as offline
3618
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3619
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3620
    else:
3621
      # Mark instance as online, but stopped
3622
      self.cfg.MarkInstanceDown(self.instance.uuid)
3623
      result.append(("admin_state", constants.ADMINST_DOWN))
3624

    
3625
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3626

    
3627
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3628
                self.owned_locks(locking.LEVEL_NODE)), \
3629
      "All node locks should have been released by now"
3630

    
3631
    return result
3632

    
3633
  _DISK_CONVERSIONS = {
3634
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3635
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3636
    }
3637

    
3638

    
3639
class LUInstanceChangeGroup(LogicalUnit):
3640
  HPATH = "instance-change-group"
3641
  HTYPE = constants.HTYPE_INSTANCE
3642
  REQ_BGL = False
3643

    
3644
  def ExpandNames(self):
3645
    self.share_locks = ShareAll()
3646

    
3647
    self.needed_locks = {
3648
      locking.LEVEL_NODEGROUP: [],
3649
      locking.LEVEL_NODE: [],
3650
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3651
      }
3652

    
3653
    self._ExpandAndLockInstance()
3654

    
3655
    if self.op.target_groups:
3656
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3657
                                  self.op.target_groups)
3658
    else:
3659
      self.req_target_uuids = None
3660

    
3661
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3662

    
3663
  def DeclareLocks(self, level):
3664
    if level == locking.LEVEL_NODEGROUP:
3665
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3666

    
3667
      if self.req_target_uuids:
3668
        lock_groups = set(self.req_target_uuids)
3669

    
3670
        # Lock all groups used by instance optimistically; this requires going
3671
        # via the node before it's locked, requiring verification later on
3672
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3673
        lock_groups.update(instance_groups)
3674
      else:
3675
        # No target groups, need to lock all of them
3676
        lock_groups = locking.ALL_SET
3677

    
3678
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3679

    
3680
    elif level == locking.LEVEL_NODE:
3681
      if self.req_target_uuids:
3682
        # Lock all nodes used by instances
3683
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3684
        self._LockInstancesNodes()
3685

    
3686
        # Lock all nodes in all potential target groups
3687
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3688
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3689
        member_nodes = [node_uuid
3690
                        for group in lock_groups
3691
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3692
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3693
      else:
3694
        # Lock all nodes as all groups are potential targets
3695
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3696

    
3697
  def CheckPrereq(self):
3698
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3699
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3700
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3701

    
3702
    assert (self.req_target_uuids is None or
3703
            owned_groups.issuperset(self.req_target_uuids))
3704
    assert owned_instance_names == set([self.op.instance_name])
3705

    
3706
    # Get instance information
3707
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3708

    
3709
    # Check if node groups for locked instance are still correct
3710
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3711
      ("Instance %s's nodes changed while we kept the lock" %
3712
       self.op.instance_name)
3713

    
3714
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3715
                                          owned_groups)
3716

    
3717
    if self.req_target_uuids:
3718
      # User requested specific target groups
3719
      self.target_uuids = frozenset(self.req_target_uuids)
3720
    else:
3721
      # All groups except those used by the instance are potential targets
3722
      self.target_uuids = owned_groups - inst_groups
3723

    
3724
    conflicting_groups = self.target_uuids & inst_groups
3725
    if conflicting_groups:
3726
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3727
                                 " used by the instance '%s'" %
3728
                                 (utils.CommaJoin(conflicting_groups),
3729
                                  self.op.instance_name),
3730
                                 errors.ECODE_INVAL)
3731

    
3732
    if not self.target_uuids:
3733
      raise errors.OpPrereqError("There are no possible target groups",
3734
                                 errors.ECODE_INVAL)
3735

    
3736
  def BuildHooksEnv(self):
3737
    """Build hooks env.
3738

3739
    """
3740
    assert self.target_uuids
3741

    
3742
    env = {
3743
      "TARGET_GROUPS": " ".join(self.target_uuids),
3744
      }
3745

    
3746
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3747

    
3748
    return env
3749

    
3750
  def BuildHooksNodes(self):
3751
    """Build hooks nodes.
3752

3753
    """
3754
    mn = self.cfg.GetMasterNode()
3755
    return ([mn], [mn])
3756

    
3757
  def Exec(self, feedback_fn):
3758
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3759

    
3760
    assert instances == [self.op.instance_name], "Instance not locked"
3761

    
3762
    req = iallocator.IAReqGroupChange(instances=instances,
3763
                                      target_groups=list(self.target_uuids))
3764
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3765

    
3766
    ial.Run(self.op.iallocator)
3767

    
3768
    if not ial.success:
3769
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3770
                                 " instance '%s' using iallocator '%s': %s" %
3771
                                 (self.op.instance_name, self.op.iallocator,
3772
                                  ial.info), errors.ECODE_NORES)
3773

    
3774
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3775

    
3776
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3777
                 " instance '%s'", len(jobs), self.op.instance_name)
3778

    
3779
    return ResultWithJobs(jobs)