Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 178ad717

History | View | Annotate | Download (149.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
import ganeti.rpc.node as rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234
    if vlan:
235
      nicparams[constants.NIC_VLAN] = vlan
236

    
237
    check_params = cluster.SimpleFillNIC(nicparams)
238
    objects.NIC.CheckParameterSyntax(check_params)
239
    net_uuid = cfg.LookupNetwork(net)
240
    name = nic.get(constants.INIC_NAME, None)
241
    if name is not None and name.lower() == constants.VALUE_NONE:
242
      name = None
243
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
244
                          network=net_uuid, nicparams=nicparams)
245
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
246
    nics.append(nic_obj)
247

    
248
  return nics
249

    
250

    
251
def _CheckForConflictingIp(lu, ip, node_uuid):
252
  """In case of conflicting IP address raise error.
253

254
  @type ip: string
255
  @param ip: IP address
256
  @type node_uuid: string
257
  @param node_uuid: node UUID
258

259
  """
260
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261
  if conf_net is not None:
262
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
263
                                " network %s, but the target NIC does not." %
264
                                (ip, conf_net)),
265
                               errors.ECODE_STATE)
266

    
267
  return (None, None)
268

    
269

    
270
def _ComputeIPolicyInstanceSpecViolation(
271
  ipolicy, instance_spec, disk_template,
272
  _compute_fn=ComputeIPolicySpecViolation):
273
  """Compute if instance specs meets the specs of ipolicy.
274

275
  @type ipolicy: dict
276
  @param ipolicy: The ipolicy to verify against
277
  @param instance_spec: dict
278
  @param instance_spec: The instance spec to verify
279
  @type disk_template: string
280
  @param disk_template: the disk template of the instance
281
  @param _compute_fn: The function to verify ipolicy (unittest only)
282
  @see: L{ComputeIPolicySpecViolation}
283

284
  """
285
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
286
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
287
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
288
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
289
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
290
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
291

    
292
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
293
                     disk_sizes, spindle_use, disk_template)
294

    
295

    
296
def _CheckOSVariant(os_obj, name):
297
  """Check whether an OS name conforms to the os variants specification.
298

299
  @type os_obj: L{objects.OS}
300
  @param os_obj: OS object to check
301
  @type name: string
302
  @param name: OS name passed by the user, to check for validity
303

304
  """
305
  variant = objects.OS.GetVariant(name)
306
  if not os_obj.supported_variants:
307
    if variant:
308
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
309
                                 " passed)" % (os_obj.name, variant),
310
                                 errors.ECODE_INVAL)
311
    return
312
  if not variant:
313
    raise errors.OpPrereqError("OS name must include a variant",
314
                               errors.ECODE_INVAL)
315

    
316
  if variant not in os_obj.supported_variants:
317
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
318

    
319

    
320
class LUInstanceCreate(LogicalUnit):
321
  """Create an instance.
322

323
  """
324
  HPATH = "instance-add"
325
  HTYPE = constants.HTYPE_INSTANCE
326
  REQ_BGL = False
327

    
328
  def _CheckDiskTemplateValid(self):
329
    """Checks validity of disk template.
330

331
    """
332
    cluster = self.cfg.GetClusterInfo()
333
    if self.op.disk_template is None:
334
      # FIXME: It would be better to take the default disk template from the
335
      # ipolicy, but for the ipolicy we need the primary node, which we get from
336
      # the iallocator, which wants the disk template as input. To solve this
337
      # chicken-and-egg problem, it should be possible to specify just a node
338
      # group from the iallocator and take the ipolicy from that.
339
      self.op.disk_template = cluster.enabled_disk_templates[0]
340
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
341

    
342
  def _CheckDiskArguments(self):
343
    """Checks validity of disk-related arguments.
344

345
    """
346
    # check that disk's names are unique and valid
347
    utils.ValidateDeviceNames("disk", self.op.disks)
348

    
349
    self._CheckDiskTemplateValid()
350

    
351
    # check disks. parameter names and consistent adopt/no-adopt strategy
352
    has_adopt = has_no_adopt = False
353
    for disk in self.op.disks:
354
      if self.op.disk_template != constants.DT_EXT:
355
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
356
      if constants.IDISK_ADOPT in disk:
357
        has_adopt = True
358
      else:
359
        has_no_adopt = True
360
    if has_adopt and has_no_adopt:
361
      raise errors.OpPrereqError("Either all disks are adopted or none is",
362
                                 errors.ECODE_INVAL)
363
    if has_adopt:
364
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
365
        raise errors.OpPrereqError("Disk adoption is not supported for the"
366
                                   " '%s' disk template" %
367
                                   self.op.disk_template,
368
                                   errors.ECODE_INVAL)
369
      if self.op.iallocator is not None:
370
        raise errors.OpPrereqError("Disk adoption not allowed with an"
371
                                   " iallocator script", errors.ECODE_INVAL)
372
      if self.op.mode == constants.INSTANCE_IMPORT:
373
        raise errors.OpPrereqError("Disk adoption not allowed for"
374
                                   " instance import", errors.ECODE_INVAL)
375
    else:
376
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
377
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
378
                                   " but no 'adopt' parameter given" %
379
                                   self.op.disk_template,
380
                                   errors.ECODE_INVAL)
381

    
382
    self.adopt_disks = has_adopt
383

    
384
  def _CheckVLANArguments(self):
385
    """ Check validity of VLANs if given
386

387
    """
388
    for nic in self.op.nics:
389
      vlan = nic.get(constants.INIC_VLAN, None)
390
      if vlan:
391
        if vlan[0] == ".":
392
          # vlan starting with dot means single untagged vlan,
393
          # might be followed by trunk (:)
394
          if not vlan[1:].isdigit():
395
            vlanlist = vlan[1:].split(':')
396
            for vl in vlanlist:
397
              if not vl.isdigit():
398
                raise errors.OpPrereqError("Specified VLAN parameter is "
399
                                           "invalid : %s" % vlan,
400
                                             errors.ECODE_INVAL)
401
        elif vlan[0] == ":":
402
          # Trunk - tagged only
403
          vlanlist = vlan[1:].split(':')
404
          for vl in vlanlist:
405
            if not vl.isdigit():
406
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
407
                                           " : %s" % vlan, errors.ECODE_INVAL)
408
        elif vlan.isdigit():
409
          # This is the simplest case. No dots, only single digit
410
          # -> Create untagged access port, dot needs to be added
411
          nic[constants.INIC_VLAN] = "." + vlan
412
        else:
413
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
414
                                       " : %s" % vlan, errors.ECODE_INVAL)
415

    
416
  def CheckArguments(self):
417
    """Check arguments.
418

419
    """
420
    # do not require name_check to ease forward/backward compatibility
421
    # for tools
422
    if self.op.no_install and self.op.start:
423
      self.LogInfo("No-installation mode selected, disabling startup")
424
      self.op.start = False
425
    # validate/normalize the instance name
426
    self.op.instance_name = \
427
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
428

    
429
    if self.op.ip_check and not self.op.name_check:
430
      # TODO: make the ip check more flexible and not depend on the name check
431
      raise errors.OpPrereqError("Cannot do IP address check without a name"
432
                                 " check", errors.ECODE_INVAL)
433

    
434
    # check nics' parameter names
435
    for nic in self.op.nics:
436
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
437
    # check that NIC's parameters names are unique and valid
438
    utils.ValidateDeviceNames("NIC", self.op.nics)
439

    
440
    self._CheckVLANArguments()
441

    
442
    self._CheckDiskArguments()
443
    assert self.op.disk_template is not None
444

    
445
    # instance name verification
446
    if self.op.name_check:
447
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
448
      self.op.instance_name = self.hostname.name
449
      # used in CheckPrereq for ip ping check
450
      self.check_ip = self.hostname.ip
451
    else:
452
      self.check_ip = None
453

    
454
    # file storage checks
455
    if (self.op.file_driver and
456
        not self.op.file_driver in constants.FILE_DRIVER):
457
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
458
                                 self.op.file_driver, errors.ECODE_INVAL)
459

    
460
    # set default file_driver if unset and required
461
    if (not self.op.file_driver and
462
        self.op.disk_template in constants.DTS_FILEBASED):
463
      self.op.file_driver = constants.FD_LOOP
464

    
465
    ### Node/iallocator related checks
466
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
467

    
468
    if self.op.pnode is not None:
469
      if self.op.disk_template in constants.DTS_INT_MIRROR:
470
        if self.op.snode is None:
471
          raise errors.OpPrereqError("The networked disk templates need"
472
                                     " a mirror node", errors.ECODE_INVAL)
473
      elif self.op.snode:
474
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
475
                        " template")
476
        self.op.snode = None
477

    
478
    _CheckOpportunisticLocking(self.op)
479

    
480
    if self.op.mode == constants.INSTANCE_IMPORT:
481
      # On import force_variant must be True, because if we forced it at
482
      # initial install, our only chance when importing it back is that it
483
      # works again!
484
      self.op.force_variant = True
485

    
486
      if self.op.no_install:
487
        self.LogInfo("No-installation mode has no effect during import")
488

    
489
    elif self.op.mode == constants.INSTANCE_CREATE:
490
      if self.op.os_type is None:
491
        raise errors.OpPrereqError("No guest OS specified",
492
                                   errors.ECODE_INVAL)
493
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
494
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
495
                                   " installation" % self.op.os_type,
496
                                   errors.ECODE_STATE)
497
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
498
      self._cds = GetClusterDomainSecret()
499

    
500
      # Check handshake to ensure both clusters have the same domain secret
501
      src_handshake = self.op.source_handshake
502
      if not src_handshake:
503
        raise errors.OpPrereqError("Missing source handshake",
504
                                   errors.ECODE_INVAL)
505

    
506
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
507
                                                           src_handshake)
508
      if errmsg:
509
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
510
                                   errors.ECODE_INVAL)
511

    
512
      # Load and check source CA
513
      self.source_x509_ca_pem = self.op.source_x509_ca
514
      if not self.source_x509_ca_pem:
515
        raise errors.OpPrereqError("Missing source X509 CA",
516
                                   errors.ECODE_INVAL)
517

    
518
      try:
519
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
520
                                                    self._cds)
521
      except OpenSSL.crypto.Error, err:
522
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
523
                                   (err, ), errors.ECODE_INVAL)
524

    
525
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
526
      if errcode is not None:
527
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
528
                                   errors.ECODE_INVAL)
529

    
530
      self.source_x509_ca = cert
531

    
532
      src_instance_name = self.op.source_instance_name
533
      if not src_instance_name:
534
        raise errors.OpPrereqError("Missing source instance name",
535
                                   errors.ECODE_INVAL)
536

    
537
      self.source_instance_name = \
538
        netutils.GetHostname(name=src_instance_name).name
539

    
540
    else:
541
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
542
                                 self.op.mode, errors.ECODE_INVAL)
543

    
544
  def ExpandNames(self):
545
    """ExpandNames for CreateInstance.
546

547
    Figure out the right locks for instance creation.
548

549
    """
550
    self.needed_locks = {}
551

    
552
    # this is just a preventive check, but someone might still add this
553
    # instance in the meantime, and creation will fail at lock-add time
554
    if self.op.instance_name in\
555
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
556
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
557
                                 self.op.instance_name, errors.ECODE_EXISTS)
558

    
559
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
560

    
561
    if self.op.iallocator:
562
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
563
      # specifying a group on instance creation and then selecting nodes from
564
      # that group
565
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
566
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
567

    
568
      if self.op.opportunistic_locking:
569
        self.opportunistic_locks[locking.LEVEL_NODE] = True
570
    else:
571
      (self.op.pnode_uuid, self.op.pnode) = \
572
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
573
      nodelist = [self.op.pnode_uuid]
574
      if self.op.snode is not None:
575
        (self.op.snode_uuid, self.op.snode) = \
576
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
577
        nodelist.append(self.op.snode_uuid)
578
      self.needed_locks[locking.LEVEL_NODE] = nodelist
579

    
580
    # in case of import lock the source node too
581
    if self.op.mode == constants.INSTANCE_IMPORT:
582
      src_node = self.op.src_node
583
      src_path = self.op.src_path
584

    
585
      if src_path is None:
586
        self.op.src_path = src_path = self.op.instance_name
587

    
588
      if src_node is None:
589
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
590
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
591
        self.op.src_node = None
592
        if os.path.isabs(src_path):
593
          raise errors.OpPrereqError("Importing an instance from a path"
594
                                     " requires a source node option",
595
                                     errors.ECODE_INVAL)
596
      else:
597
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
598
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
599
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
600
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
601
        if not os.path.isabs(src_path):
602
          self.op.src_path = \
603
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
604

    
605
    self.needed_locks[locking.LEVEL_NODE_RES] = \
606
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
607

    
608
    # Optimistically acquire shared group locks (we're reading the
609
    # configuration).  We can't just call GetInstanceNodeGroups, because the
610
    # instance doesn't exist yet. Therefore we lock all node groups of all
611
    # nodes we have.
612
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
613
      # In the case we lock all nodes for opportunistic allocation, we have no
614
      # choice than to lock all groups, because they're allocated before nodes.
615
      # This is sad, but true. At least we release all those we don't need in
616
      # CheckPrereq later.
617
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
618
    else:
619
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
620
        list(self.cfg.GetNodeGroupsFromNodes(
621
          self.needed_locks[locking.LEVEL_NODE]))
622
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
623

    
624
  def DeclareLocks(self, level):
625
    if level == locking.LEVEL_NODE_RES and \
626
      self.opportunistic_locks[locking.LEVEL_NODE]:
627
      # Even when using opportunistic locking, we require the same set of
628
      # NODE_RES locks as we got NODE locks
629
      self.needed_locks[locking.LEVEL_NODE_RES] = \
630
        self.owned_locks(locking.LEVEL_NODE)
631

    
632
  def _RunAllocator(self):
633
    """Run the allocator based on input opcode.
634

635
    """
636
    if self.op.opportunistic_locking:
637
      # Only consider nodes for which a lock is held
638
      node_name_whitelist = self.cfg.GetNodeNames(
639
        self.owned_locks(locking.LEVEL_NODE))
640
    else:
641
      node_name_whitelist = None
642

    
643
    req = _CreateInstanceAllocRequest(self.op, self.disks,
644
                                      self.nics, self.be_full,
645
                                      node_name_whitelist)
646
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
647

    
648
    ial.Run(self.op.iallocator)
649

    
650
    if not ial.success:
651
      # When opportunistic locks are used only a temporary failure is generated
652
      if self.op.opportunistic_locking:
653
        ecode = errors.ECODE_TEMP_NORES
654
      else:
655
        ecode = errors.ECODE_NORES
656

    
657
      raise errors.OpPrereqError("Can't compute nodes using"
658
                                 " iallocator '%s': %s" %
659
                                 (self.op.iallocator, ial.info),
660
                                 ecode)
661

    
662
    (self.op.pnode_uuid, self.op.pnode) = \
663
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
664
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
665
                 self.op.instance_name, self.op.iallocator,
666
                 utils.CommaJoin(ial.result))
667

    
668
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
669

    
670
    if req.RequiredNodes() == 2:
671
      (self.op.snode_uuid, self.op.snode) = \
672
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
673

    
674
  def BuildHooksEnv(self):
675
    """Build hooks env.
676

677
    This runs on master, primary and secondary nodes of the instance.
678

679
    """
680
    env = {
681
      "ADD_MODE": self.op.mode,
682
      }
683
    if self.op.mode == constants.INSTANCE_IMPORT:
684
      env["SRC_NODE"] = self.op.src_node
685
      env["SRC_PATH"] = self.op.src_path
686
      env["SRC_IMAGES"] = self.src_images
687

    
688
    env.update(BuildInstanceHookEnv(
689
      name=self.op.instance_name,
690
      primary_node_name=self.op.pnode,
691
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
692
      status=self.op.start,
693
      os_type=self.op.os_type,
694
      minmem=self.be_full[constants.BE_MINMEM],
695
      maxmem=self.be_full[constants.BE_MAXMEM],
696
      vcpus=self.be_full[constants.BE_VCPUS],
697
      nics=NICListToTuple(self, self.nics),
698
      disk_template=self.op.disk_template,
699
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
700
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
701
             for d in self.disks],
702
      bep=self.be_full,
703
      hvp=self.hv_full,
704
      hypervisor_name=self.op.hypervisor,
705
      tags=self.op.tags,
706
      ))
707

    
708
    return env
709

    
710
  def BuildHooksNodes(self):
711
    """Build hooks nodes.
712

713
    """
714
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
715
    return nl, nl
716

    
717
  def _ReadExportInfo(self):
718
    """Reads the export information from disk.
719

720
    It will override the opcode source node and path with the actual
721
    information, if these two were not specified before.
722

723
    @return: the export information
724

725
    """
726
    assert self.op.mode == constants.INSTANCE_IMPORT
727

    
728
    if self.op.src_node_uuid is None:
729
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
730
      exp_list = self.rpc.call_export_list(locked_nodes)
731
      found = False
732
      for node_uuid in exp_list:
733
        if exp_list[node_uuid].fail_msg:
734
          continue
735
        if self.op.src_path in exp_list[node_uuid].payload:
736
          found = True
737
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
738
          self.op.src_node_uuid = node_uuid
739
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
740
                                            self.op.src_path)
741
          break
742
      if not found:
743
        raise errors.OpPrereqError("No export found for relative path %s" %
744
                                   self.op.src_path, errors.ECODE_INVAL)
745

    
746
    CheckNodeOnline(self, self.op.src_node_uuid)
747
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
748
    result.Raise("No export or invalid export found in dir %s" %
749
                 self.op.src_path)
750

    
751
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
752
    if not export_info.has_section(constants.INISECT_EXP):
753
      raise errors.ProgrammerError("Corrupted export config",
754
                                   errors.ECODE_ENVIRON)
755

    
756
    ei_version = export_info.get(constants.INISECT_EXP, "version")
757
    if int(ei_version) != constants.EXPORT_VERSION:
758
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
759
                                 (ei_version, constants.EXPORT_VERSION),
760
                                 errors.ECODE_ENVIRON)
761
    return export_info
762

    
763
  def _ReadExportParams(self, einfo):
764
    """Use export parameters as defaults.
765

766
    In case the opcode doesn't specify (as in override) some instance
767
    parameters, then try to use them from the export information, if
768
    that declares them.
769

770
    """
771
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
772

    
773
    if not self.op.disks:
774
      disks = []
775
      # TODO: import the disk iv_name too
776
      for idx in range(constants.MAX_DISKS):
777
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
778
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
779
          disks.append({constants.IDISK_SIZE: disk_sz})
780
      self.op.disks = disks
781
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
782
        raise errors.OpPrereqError("No disk info specified and the export"
783
                                   " is missing the disk information",
784
                                   errors.ECODE_INVAL)
785

    
786
    if not self.op.nics:
787
      nics = []
788
      for idx in range(constants.MAX_NICS):
789
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
790
          ndict = {}
791
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
792
            nic_param_name = "nic%d_%s" % (idx, name)
793
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
794
              v = einfo.get(constants.INISECT_INS, nic_param_name)
795
              ndict[name] = v
796
          nics.append(ndict)
797
        else:
798
          break
799
      self.op.nics = nics
800

    
801
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
802
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
803

    
804
    if (self.op.hypervisor is None and
805
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
806
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
807

    
808
    if einfo.has_section(constants.INISECT_HYP):
809
      # use the export parameters but do not override the ones
810
      # specified by the user
811
      for name, value in einfo.items(constants.INISECT_HYP):
812
        if name not in self.op.hvparams:
813
          self.op.hvparams[name] = value
814

    
815
    if einfo.has_section(constants.INISECT_BEP):
816
      # use the parameters, without overriding
817
      for name, value in einfo.items(constants.INISECT_BEP):
818
        if name not in self.op.beparams:
819
          self.op.beparams[name] = value
820
        # Compatibility for the old "memory" be param
821
        if name == constants.BE_MEMORY:
822
          if constants.BE_MAXMEM not in self.op.beparams:
823
            self.op.beparams[constants.BE_MAXMEM] = value
824
          if constants.BE_MINMEM not in self.op.beparams:
825
            self.op.beparams[constants.BE_MINMEM] = value
826
    else:
827
      # try to read the parameters old style, from the main section
828
      for name in constants.BES_PARAMETERS:
829
        if (name not in self.op.beparams and
830
            einfo.has_option(constants.INISECT_INS, name)):
831
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
832

    
833
    if einfo.has_section(constants.INISECT_OSP):
834
      # use the parameters, without overriding
835
      for name, value in einfo.items(constants.INISECT_OSP):
836
        if name not in self.op.osparams:
837
          self.op.osparams[name] = value
838

    
839
  def _RevertToDefaults(self, cluster):
840
    """Revert the instance parameters to the default values.
841

842
    """
843
    # hvparams
844
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
845
    for name in self.op.hvparams.keys():
846
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
847
        del self.op.hvparams[name]
848
    # beparams
849
    be_defs = cluster.SimpleFillBE({})
850
    for name in self.op.beparams.keys():
851
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
852
        del self.op.beparams[name]
853
    # nic params
854
    nic_defs = cluster.SimpleFillNIC({})
855
    for nic in self.op.nics:
856
      for name in constants.NICS_PARAMETERS:
857
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
858
          del nic[name]
859
    # osparams
860
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
861
    for name in self.op.osparams.keys():
862
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
863
        del self.op.osparams[name]
864

    
865
  def _CalculateFileStorageDir(self):
866
    """Calculate final instance file storage dir.
867

868
    """
869
    # file storage dir calculation/check
870
    self.instance_file_storage_dir = None
871
    if self.op.disk_template in constants.DTS_FILEBASED:
872
      # build the full file storage dir path
873
      joinargs = []
874

    
875
      cfg_storage = None
876
      if self.op.disk_template == constants.DT_FILE:
877
        cfg_storage = self.cfg.GetFileStorageDir()
878
      elif self.op.disk_template == constants.DT_SHARED_FILE:
879
        cfg_storage = self.cfg.GetSharedFileStorageDir()
880
      elif self.op.disk_template == constants.DT_GLUSTER:
881
        cfg_storage = self.cfg.GetGlusterStorageDir()
882

    
883
      if not cfg_storage:
884
        raise errors.OpPrereqError(
885
          "Cluster file storage dir for {tpl} storage type not defined".format(
886
            tpl=repr(self.op.disk_template)
887
          ),
888
          errors.ECODE_STATE
889
      )
890

    
891
      joinargs.append(cfg_storage)
892

    
893
      if self.op.file_storage_dir is not None:
894
        joinargs.append(self.op.file_storage_dir)
895

    
896
      if self.op.disk_template != constants.DT_GLUSTER:
897
        joinargs.append(self.op.instance_name)
898

    
899
      if len(joinargs) > 1:
900
        # pylint: disable=W0142
901
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
902
      else:
903
        self.instance_file_storage_dir = joinargs[0]
904

    
905
  def CheckPrereq(self): # pylint: disable=R0914
906
    """Check prerequisites.
907

908
    """
909
    # Check that the optimistically acquired groups are correct wrt the
910
    # acquired nodes
911
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
912
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
913
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
914
    if not owned_groups.issuperset(cur_groups):
915
      raise errors.OpPrereqError("New instance %s's node groups changed since"
916
                                 " locks were acquired, current groups are"
917
                                 " are '%s', owning groups '%s'; retry the"
918
                                 " operation" %
919
                                 (self.op.instance_name,
920
                                  utils.CommaJoin(cur_groups),
921
                                  utils.CommaJoin(owned_groups)),
922
                                 errors.ECODE_STATE)
923

    
924
    self._CalculateFileStorageDir()
925

    
926
    if self.op.mode == constants.INSTANCE_IMPORT:
927
      export_info = self._ReadExportInfo()
928
      self._ReadExportParams(export_info)
929
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
930
    else:
931
      self._old_instance_name = None
932

    
933
    if (not self.cfg.GetVGName() and
934
        self.op.disk_template not in constants.DTS_NOT_LVM):
935
      raise errors.OpPrereqError("Cluster does not support lvm-based"
936
                                 " instances", errors.ECODE_STATE)
937

    
938
    if (self.op.hypervisor is None or
939
        self.op.hypervisor == constants.VALUE_AUTO):
940
      self.op.hypervisor = self.cfg.GetHypervisorType()
941

    
942
    cluster = self.cfg.GetClusterInfo()
943
    enabled_hvs = cluster.enabled_hypervisors
944
    if self.op.hypervisor not in enabled_hvs:
945
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
946
                                 " cluster (%s)" %
947
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
948
                                 errors.ECODE_STATE)
949

    
950
    # Check tag validity
951
    for tag in self.op.tags:
952
      objects.TaggableObject.ValidateTag(tag)
953

    
954
    # check hypervisor parameter syntax (locally)
955
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
956
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
957
                                      self.op.hvparams)
958
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
959
    hv_type.CheckParameterSyntax(filled_hvp)
960
    self.hv_full = filled_hvp
961
    # check that we don't specify global parameters on an instance
962
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
963
                         "instance", "cluster")
964

    
965
    # fill and remember the beparams dict
966
    self.be_full = _ComputeFullBeParams(self.op, cluster)
967

    
968
    # build os parameters
969
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
970

    
971
    # now that hvp/bep are in final format, let's reset to defaults,
972
    # if told to do so
973
    if self.op.identify_defaults:
974
      self._RevertToDefaults(cluster)
975

    
976
    # NIC buildup
977
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
978
                             self.proc.GetECId())
979

    
980
    # disk checks/pre-build
981
    default_vg = self.cfg.GetVGName()
982
    self.disks = ComputeDisks(self.op, default_vg)
983

    
984
    if self.op.mode == constants.INSTANCE_IMPORT:
985
      disk_images = []
986
      for idx in range(len(self.disks)):
987
        option = "disk%d_dump" % idx
988
        if export_info.has_option(constants.INISECT_INS, option):
989
          # FIXME: are the old os-es, disk sizes, etc. useful?
990
          export_name = export_info.get(constants.INISECT_INS, option)
991
          image = utils.PathJoin(self.op.src_path, export_name)
992
          disk_images.append(image)
993
        else:
994
          disk_images.append(False)
995

    
996
      self.src_images = disk_images
997

    
998
      if self.op.instance_name == self._old_instance_name:
999
        for idx, nic in enumerate(self.nics):
1000
          if nic.mac == constants.VALUE_AUTO:
1001
            nic_mac_ini = "nic%d_mac" % idx
1002
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1003

    
1004
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1005

    
1006
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1007
    if self.op.ip_check:
1008
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1009
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1010
                                   (self.check_ip, self.op.instance_name),
1011
                                   errors.ECODE_NOTUNIQUE)
1012

    
1013
    #### mac address generation
1014
    # By generating here the mac address both the allocator and the hooks get
1015
    # the real final mac address rather than the 'auto' or 'generate' value.
1016
    # There is a race condition between the generation and the instance object
1017
    # creation, which means that we know the mac is valid now, but we're not
1018
    # sure it will be when we actually add the instance. If things go bad
1019
    # adding the instance will abort because of a duplicate mac, and the
1020
    # creation job will fail.
1021
    for nic in self.nics:
1022
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1023
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1024

    
1025
    #### allocator run
1026

    
1027
    if self.op.iallocator is not None:
1028
      self._RunAllocator()
1029

    
1030
    # Release all unneeded node locks
1031
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1032
                               self.op.src_node_uuid])
1033
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1034
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1035
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1036
    # Release all unneeded group locks
1037
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1038
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1039

    
1040
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1041
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1042
      "Node locks differ from node resource locks"
1043

    
1044
    #### node related checks
1045

    
1046
    # check primary node
1047
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1048
    assert self.pnode is not None, \
1049
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1050
    if pnode.offline:
1051
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1052
                                 pnode.name, errors.ECODE_STATE)
1053
    if pnode.drained:
1054
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1055
                                 pnode.name, errors.ECODE_STATE)
1056
    if not pnode.vm_capable:
1057
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1058
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1059

    
1060
    self.secondaries = []
1061

    
1062
    # Fill in any IPs from IP pools. This must happen here, because we need to
1063
    # know the nic's primary node, as specified by the iallocator
1064
    for idx, nic in enumerate(self.nics):
1065
      net_uuid = nic.network
1066
      if net_uuid is not None:
1067
        nobj = self.cfg.GetNetwork(net_uuid)
1068
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1069
        if netparams is None:
1070
          raise errors.OpPrereqError("No netparams found for network"
1071
                                     " %s. Probably not connected to"
1072
                                     " node's %s nodegroup" %
1073
                                     (nobj.name, self.pnode.name),
1074
                                     errors.ECODE_INVAL)
1075
        self.LogInfo("NIC/%d inherits netparams %s" %
1076
                     (idx, netparams.values()))
1077
        nic.nicparams = dict(netparams)
1078
        if nic.ip is not None:
1079
          if nic.ip.lower() == constants.NIC_IP_POOL:
1080
            try:
1081
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1082
            except errors.ReservationError:
1083
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1084
                                         " from the address pool" % idx,
1085
                                         errors.ECODE_STATE)
1086
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1087
          else:
1088
            try:
1089
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1090
                                 check=self.op.conflicts_check)
1091
            except errors.ReservationError:
1092
              raise errors.OpPrereqError("IP address %s already in use"
1093
                                         " or does not belong to network %s" %
1094
                                         (nic.ip, nobj.name),
1095
                                         errors.ECODE_NOTUNIQUE)
1096

    
1097
      # net is None, ip None or given
1098
      elif self.op.conflicts_check:
1099
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1100

    
1101
    # mirror node verification
1102
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1103
      if self.op.snode_uuid == pnode.uuid:
1104
        raise errors.OpPrereqError("The secondary node cannot be the"
1105
                                   " primary node", errors.ECODE_INVAL)
1106
      CheckNodeOnline(self, self.op.snode_uuid)
1107
      CheckNodeNotDrained(self, self.op.snode_uuid)
1108
      CheckNodeVmCapable(self, self.op.snode_uuid)
1109
      self.secondaries.append(self.op.snode_uuid)
1110

    
1111
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1112
      if pnode.group != snode.group:
1113
        self.LogWarning("The primary and secondary nodes are in two"
1114
                        " different node groups; the disk parameters"
1115
                        " from the first disk's node group will be"
1116
                        " used")
1117

    
1118
    nodes = [pnode]
1119
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1120
      nodes.append(snode)
1121
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1122
    excl_stor = compat.any(map(has_es, nodes))
1123
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1124
      raise errors.OpPrereqError("Disk template %s not supported with"
1125
                                 " exclusive storage" % self.op.disk_template,
1126
                                 errors.ECODE_STATE)
1127
    for disk in self.disks:
1128
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1129

    
1130
    node_uuids = [pnode.uuid] + self.secondaries
1131

    
1132
    if not self.adopt_disks:
1133
      if self.op.disk_template == constants.DT_RBD:
1134
        # _CheckRADOSFreeSpace() is just a placeholder.
1135
        # Any function that checks prerequisites can be placed here.
1136
        # Check if there is enough space on the RADOS cluster.
1137
        CheckRADOSFreeSpace()
1138
      elif self.op.disk_template == constants.DT_EXT:
1139
        # FIXME: Function that checks prereqs if needed
1140
        pass
1141
      elif self.op.disk_template in constants.DTS_LVM:
1142
        # Check lv size requirements, if not adopting
1143
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1144
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1145
      else:
1146
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1147
        pass
1148

    
1149
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1150
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1151
                                disk[constants.IDISK_ADOPT])
1152
                     for disk in self.disks])
1153
      if len(all_lvs) != len(self.disks):
1154
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1155
                                   errors.ECODE_INVAL)
1156
      for lv_name in all_lvs:
1157
        try:
1158
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1159
          # to ReserveLV uses the same syntax
1160
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1161
        except errors.ReservationError:
1162
          raise errors.OpPrereqError("LV named %s used by another instance" %
1163
                                     lv_name, errors.ECODE_NOTUNIQUE)
1164

    
1165
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1166
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1167

    
1168
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1169
                                       vg_names.payload.keys())[pnode.uuid]
1170
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1171
      node_lvs = node_lvs.payload
1172

    
1173
      delta = all_lvs.difference(node_lvs.keys())
1174
      if delta:
1175
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1176
                                   utils.CommaJoin(delta),
1177
                                   errors.ECODE_INVAL)
1178
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1179
      if online_lvs:
1180
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1181
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1182
                                   errors.ECODE_STATE)
1183
      # update the size of disk based on what is found
1184
      for dsk in self.disks:
1185
        dsk[constants.IDISK_SIZE] = \
1186
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1187
                                        dsk[constants.IDISK_ADOPT])][0]))
1188

    
1189
    elif self.op.disk_template == constants.DT_BLOCK:
1190
      # Normalize and de-duplicate device paths
1191
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1192
                       for disk in self.disks])
1193
      if len(all_disks) != len(self.disks):
1194
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1195
                                   errors.ECODE_INVAL)
1196
      baddisks = [d for d in all_disks
1197
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1198
      if baddisks:
1199
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1200
                                   " cannot be adopted" %
1201
                                   (utils.CommaJoin(baddisks),
1202
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1203
                                   errors.ECODE_INVAL)
1204

    
1205
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1206
                                            list(all_disks))[pnode.uuid]
1207
      node_disks.Raise("Cannot get block device information from node %s" %
1208
                       pnode.name)
1209
      node_disks = node_disks.payload
1210
      delta = all_disks.difference(node_disks.keys())
1211
      if delta:
1212
        raise errors.OpPrereqError("Missing block device(s): %s" %
1213
                                   utils.CommaJoin(delta),
1214
                                   errors.ECODE_INVAL)
1215
      for dsk in self.disks:
1216
        dsk[constants.IDISK_SIZE] = \
1217
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1218

    
1219
    # Check disk access param to be compatible with specified hypervisor
1220
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1221
    node_group = self.cfg.GetNodeGroup(node_info.group)
1222
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1223
    access_type = disk_params[self.op.disk_template].get(
1224
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1225
    )
1226

    
1227
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1228
                                            self.op.disk_template,
1229
                                            access_type):
1230
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1231
                                 " used with %s disk access param" %
1232
                                 (self.op.hypervisor, access_type),
1233
                                  errors.ECODE_STATE)
1234

    
1235
    # Verify instance specs
1236
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1237
    ispec = {
1238
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1239
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1240
      constants.ISPEC_DISK_COUNT: len(self.disks),
1241
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1242
                                  for disk in self.disks],
1243
      constants.ISPEC_NIC_COUNT: len(self.nics),
1244
      constants.ISPEC_SPINDLE_USE: spindle_use,
1245
      }
1246

    
1247
    group_info = self.cfg.GetNodeGroup(pnode.group)
1248
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1249
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1250
                                               self.op.disk_template)
1251
    if not self.op.ignore_ipolicy and res:
1252
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1253
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1254
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1255

    
1256
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1257

    
1258
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1259
    # check OS parameters (remotely)
1260
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1261

    
1262
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1263

    
1264
    #TODO: _CheckExtParams (remotely)
1265
    # Check parameters for extstorage
1266

    
1267
    # memory check on primary node
1268
    #TODO(dynmem): use MINMEM for checking
1269
    if self.op.start:
1270
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1271
                                self.op.hvparams)
1272
      CheckNodeFreeMemory(self, self.pnode.uuid,
1273
                          "creating instance %s" % self.op.instance_name,
1274
                          self.be_full[constants.BE_MAXMEM],
1275
                          self.op.hypervisor, hvfull)
1276

    
1277
    self.dry_run_result = list(node_uuids)
1278

    
1279
  def Exec(self, feedback_fn):
1280
    """Create and add the instance to the cluster.
1281

1282
    """
1283
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1284
                self.owned_locks(locking.LEVEL_NODE)), \
1285
      "Node locks differ from node resource locks"
1286
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1287

    
1288
    ht_kind = self.op.hypervisor
1289
    if ht_kind in constants.HTS_REQ_PORT:
1290
      network_port = self.cfg.AllocatePort()
1291
    else:
1292
      network_port = None
1293

    
1294
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1295

    
1296
    # This is ugly but we got a chicken-egg problem here
1297
    # We can only take the group disk parameters, as the instance
1298
    # has no disks yet (we are generating them right here).
1299
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1300
    disks = GenerateDiskTemplate(self,
1301
                                 self.op.disk_template,
1302
                                 instance_uuid, self.pnode.uuid,
1303
                                 self.secondaries,
1304
                                 self.disks,
1305
                                 self.instance_file_storage_dir,
1306
                                 self.op.file_driver,
1307
                                 0,
1308
                                 feedback_fn,
1309
                                 self.cfg.GetGroupDiskParams(nodegroup))
1310

    
1311
    iobj = objects.Instance(name=self.op.instance_name,
1312
                            uuid=instance_uuid,
1313
                            os=self.op.os_type,
1314
                            primary_node=self.pnode.uuid,
1315
                            nics=self.nics, disks=disks,
1316
                            disk_template=self.op.disk_template,
1317
                            disks_active=False,
1318
                            admin_state=constants.ADMINST_DOWN,
1319
                            network_port=network_port,
1320
                            beparams=self.op.beparams,
1321
                            hvparams=self.op.hvparams,
1322
                            hypervisor=self.op.hypervisor,
1323
                            osparams=self.op.osparams,
1324
                            )
1325

    
1326
    if self.op.tags:
1327
      for tag in self.op.tags:
1328
        iobj.AddTag(tag)
1329

    
1330
    if self.adopt_disks:
1331
      if self.op.disk_template == constants.DT_PLAIN:
1332
        # rename LVs to the newly-generated names; we need to construct
1333
        # 'fake' LV disks with the old data, plus the new unique_id
1334
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1335
        rename_to = []
1336
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1337
          rename_to.append(t_dsk.logical_id)
1338
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1339
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1340
                                               zip(tmp_disks, rename_to))
1341
        result.Raise("Failed to rename adoped LVs")
1342
    else:
1343
      feedback_fn("* creating instance disks...")
1344
      try:
1345
        CreateDisks(self, iobj)
1346
      except errors.OpExecError:
1347
        self.LogWarning("Device creation failed")
1348
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1349
        raise
1350

    
1351
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1352

    
1353
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1354

    
1355
    # Declare that we don't want to remove the instance lock anymore, as we've
1356
    # added the instance to the config
1357
    del self.remove_locks[locking.LEVEL_INSTANCE]
1358

    
1359
    if self.op.mode == constants.INSTANCE_IMPORT:
1360
      # Release unused nodes
1361
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1362
    else:
1363
      # Release all nodes
1364
      ReleaseLocks(self, locking.LEVEL_NODE)
1365

    
1366
    disk_abort = False
1367
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1368
      feedback_fn("* wiping instance disks...")
1369
      try:
1370
        WipeDisks(self, iobj)
1371
      except errors.OpExecError, err:
1372
        logging.exception("Wiping disks failed")
1373
        self.LogWarning("Wiping instance disks failed (%s)", err)
1374
        disk_abort = True
1375

    
1376
    if disk_abort:
1377
      # Something is already wrong with the disks, don't do anything else
1378
      pass
1379
    elif self.op.wait_for_sync:
1380
      disk_abort = not WaitForSync(self, iobj)
1381
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1382
      # make sure the disks are not degraded (still sync-ing is ok)
1383
      feedback_fn("* checking mirrors status")
1384
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1385
    else:
1386
      disk_abort = False
1387

    
1388
    if disk_abort:
1389
      RemoveDisks(self, iobj)
1390
      self.cfg.RemoveInstance(iobj.uuid)
1391
      # Make sure the instance lock gets removed
1392
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1393
      raise errors.OpExecError("There are some degraded disks for"
1394
                               " this instance")
1395

    
1396
    # instance disks are now active
1397
    iobj.disks_active = True
1398

    
1399
    # Release all node resource locks
1400
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1401

    
1402
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1403
      if self.op.mode == constants.INSTANCE_CREATE:
1404
        if not self.op.no_install:
1405
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1406
                        not self.op.wait_for_sync)
1407
          if pause_sync:
1408
            feedback_fn("* pausing disk sync to install instance OS")
1409
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1410
                                                              (iobj.disks,
1411
                                                               iobj), True)
1412
            for idx, success in enumerate(result.payload):
1413
              if not success:
1414
                logging.warn("pause-sync of instance %s for disk %d failed",
1415
                             self.op.instance_name, idx)
1416

    
1417
          feedback_fn("* running the instance OS create scripts...")
1418
          # FIXME: pass debug option from opcode to backend
1419
          os_add_result = \
1420
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1421
                                          self.op.debug_level)
1422
          if pause_sync:
1423
            feedback_fn("* resuming disk sync")
1424
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1425
                                                              (iobj.disks,
1426
                                                               iobj), False)
1427
            for idx, success in enumerate(result.payload):
1428
              if not success:
1429
                logging.warn("resume-sync of instance %s for disk %d failed",
1430
                             self.op.instance_name, idx)
1431

    
1432
          os_add_result.Raise("Could not add os for instance %s"
1433
                              " on node %s" % (self.op.instance_name,
1434
                                               self.pnode.name))
1435

    
1436
      else:
1437
        if self.op.mode == constants.INSTANCE_IMPORT:
1438
          feedback_fn("* running the instance OS import scripts...")
1439

    
1440
          transfers = []
1441

    
1442
          for idx, image in enumerate(self.src_images):
1443
            if not image:
1444
              continue
1445

    
1446
            # FIXME: pass debug option from opcode to backend
1447
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1448
                                               constants.IEIO_FILE, (image, ),
1449
                                               constants.IEIO_SCRIPT,
1450
                                               ((iobj.disks[idx], iobj), idx),
1451
                                               None)
1452
            transfers.append(dt)
1453

    
1454
          import_result = \
1455
            masterd.instance.TransferInstanceData(self, feedback_fn,
1456
                                                  self.op.src_node_uuid,
1457
                                                  self.pnode.uuid,
1458
                                                  self.pnode.secondary_ip,
1459
                                                  self.op.compress,
1460
                                                  iobj, transfers)
1461
          if not compat.all(import_result):
1462
            self.LogWarning("Some disks for instance %s on node %s were not"
1463
                            " imported successfully" % (self.op.instance_name,
1464
                                                        self.pnode.name))
1465

    
1466
          rename_from = self._old_instance_name
1467

    
1468
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1469
          feedback_fn("* preparing remote import...")
1470
          # The source cluster will stop the instance before attempting to make
1471
          # a connection. In some cases stopping an instance can take a long
1472
          # time, hence the shutdown timeout is added to the connection
1473
          # timeout.
1474
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1475
                             self.op.source_shutdown_timeout)
1476
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1477

    
1478
          assert iobj.primary_node == self.pnode.uuid
1479
          disk_results = \
1480
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1481
                                          self.source_x509_ca,
1482
                                          self._cds, self.op.compress, timeouts)
1483
          if not compat.all(disk_results):
1484
            # TODO: Should the instance still be started, even if some disks
1485
            # failed to import (valid for local imports, too)?
1486
            self.LogWarning("Some disks for instance %s on node %s were not"
1487
                            " imported successfully" % (self.op.instance_name,
1488
                                                        self.pnode.name))
1489

    
1490
          rename_from = self.source_instance_name
1491

    
1492
        else:
1493
          # also checked in the prereq part
1494
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1495
                                       % self.op.mode)
1496

    
1497
        # Run rename script on newly imported instance
1498
        assert iobj.name == self.op.instance_name
1499
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1500
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1501
                                                   rename_from,
1502
                                                   self.op.debug_level)
1503
        result.Warn("Failed to run rename script for %s on node %s" %
1504
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1505

    
1506
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1507

    
1508
    if self.op.start:
1509
      iobj.admin_state = constants.ADMINST_UP
1510
      self.cfg.Update(iobj, feedback_fn)
1511
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1512
                   self.pnode.name)
1513
      feedback_fn("* starting instance...")
1514
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1515
                                            False, self.op.reason)
1516
      result.Raise("Could not start instance")
1517

    
1518
    return list(iobj.all_nodes)
1519

    
1520

    
1521
class LUInstanceRename(LogicalUnit):
1522
  """Rename an instance.
1523

1524
  """
1525
  HPATH = "instance-rename"
1526
  HTYPE = constants.HTYPE_INSTANCE
1527

    
1528
  def CheckArguments(self):
1529
    """Check arguments.
1530

1531
    """
1532
    if self.op.ip_check and not self.op.name_check:
1533
      # TODO: make the ip check more flexible and not depend on the name check
1534
      raise errors.OpPrereqError("IP address check requires a name check",
1535
                                 errors.ECODE_INVAL)
1536

    
1537
  def BuildHooksEnv(self):
1538
    """Build hooks env.
1539

1540
    This runs on master, primary and secondary nodes of the instance.
1541

1542
    """
1543
    env = BuildInstanceHookEnvByObject(self, self.instance)
1544
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1545
    return env
1546

    
1547
  def BuildHooksNodes(self):
1548
    """Build hooks nodes.
1549

1550
    """
1551
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1552
    return (nl, nl)
1553

    
1554
  def CheckPrereq(self):
1555
    """Check prerequisites.
1556

1557
    This checks that the instance is in the cluster and is not running.
1558

1559
    """
1560
    (self.op.instance_uuid, self.op.instance_name) = \
1561
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1562
                                self.op.instance_name)
1563
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1564
    assert instance is not None
1565

    
1566
    # It should actually not happen that an instance is running with a disabled
1567
    # disk template, but in case it does, the renaming of file-based instances
1568
    # will fail horribly. Thus, we test it before.
1569
    if (instance.disk_template in constants.DTS_FILEBASED and
1570
        self.op.new_name != instance.name):
1571
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1572
                               instance.disk_template)
1573

    
1574
    CheckNodeOnline(self, instance.primary_node)
1575
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1576
                       msg="cannot rename")
1577
    self.instance = instance
1578

    
1579
    new_name = self.op.new_name
1580
    if self.op.name_check:
1581
      hostname = _CheckHostnameSane(self, new_name)
1582
      new_name = self.op.new_name = hostname.name
1583
      if (self.op.ip_check and
1584
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1585
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1586
                                   (hostname.ip, new_name),
1587
                                   errors.ECODE_NOTUNIQUE)
1588

    
1589
    instance_names = [inst.name for
1590
                      inst in self.cfg.GetAllInstancesInfo().values()]
1591
    if new_name in instance_names and new_name != instance.name:
1592
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1593
                                 new_name, errors.ECODE_EXISTS)
1594

    
1595
  def Exec(self, feedback_fn):
1596
    """Rename the instance.
1597

1598
    """
1599
    old_name = self.instance.name
1600

    
1601
    rename_file_storage = False
1602
    if (self.instance.disk_template in (constants.DT_FILE,
1603
                                        constants.DT_SHARED_FILE) and
1604
        self.op.new_name != self.instance.name):
1605
      old_file_storage_dir = os.path.dirname(
1606
                               self.instance.disks[0].logical_id[1])
1607
      rename_file_storage = True
1608

    
1609
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1610
    # Change the instance lock. This is definitely safe while we hold the BGL.
1611
    # Otherwise the new lock would have to be added in acquired mode.
1612
    assert self.REQ_BGL
1613
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1614
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1615
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1616

    
1617
    # re-read the instance from the configuration after rename
1618
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1619

    
1620
    if rename_file_storage:
1621
      new_file_storage_dir = os.path.dirname(
1622
                               renamed_inst.disks[0].logical_id[1])
1623
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1624
                                                     old_file_storage_dir,
1625
                                                     new_file_storage_dir)
1626
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1627
                   " (but the instance has been renamed in Ganeti)" %
1628
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1629
                    old_file_storage_dir, new_file_storage_dir))
1630

    
1631
    StartInstanceDisks(self, renamed_inst, None)
1632
    # update info on disks
1633
    info = GetInstanceInfoText(renamed_inst)
1634
    for (idx, disk) in enumerate(renamed_inst.disks):
1635
      for node_uuid in renamed_inst.all_nodes:
1636
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1637
                                                (disk, renamed_inst), info)
1638
        result.Warn("Error setting info on node %s for disk %s" %
1639
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1640
    try:
1641
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1642
                                                 renamed_inst, old_name,
1643
                                                 self.op.debug_level)
1644
      result.Warn("Could not run OS rename script for instance %s on node %s"
1645
                  " (but the instance has been renamed in Ganeti)" %
1646
                  (renamed_inst.name,
1647
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1648
                  self.LogWarning)
1649
    finally:
1650
      ShutdownInstanceDisks(self, renamed_inst)
1651

    
1652
    return renamed_inst.name
1653

    
1654

    
1655
class LUInstanceRemove(LogicalUnit):
1656
  """Remove an instance.
1657

1658
  """
1659
  HPATH = "instance-remove"
1660
  HTYPE = constants.HTYPE_INSTANCE
1661
  REQ_BGL = False
1662

    
1663
  def ExpandNames(self):
1664
    self._ExpandAndLockInstance()
1665
    self.needed_locks[locking.LEVEL_NODE] = []
1666
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1667
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1668

    
1669
  def DeclareLocks(self, level):
1670
    if level == locking.LEVEL_NODE:
1671
      self._LockInstancesNodes()
1672
    elif level == locking.LEVEL_NODE_RES:
1673
      # Copy node locks
1674
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1675
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1676

    
1677
  def BuildHooksEnv(self):
1678
    """Build hooks env.
1679

1680
    This runs on master, primary and secondary nodes of the instance.
1681

1682
    """
1683
    env = BuildInstanceHookEnvByObject(self, self.instance)
1684
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1685
    return env
1686

    
1687
  def BuildHooksNodes(self):
1688
    """Build hooks nodes.
1689

1690
    """
1691
    nl = [self.cfg.GetMasterNode()]
1692
    nl_post = list(self.instance.all_nodes) + nl
1693
    return (nl, nl_post)
1694

    
1695
  def CheckPrereq(self):
1696
    """Check prerequisites.
1697

1698
    This checks that the instance is in the cluster.
1699

1700
    """
1701
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1702
    assert self.instance is not None, \
1703
      "Cannot retrieve locked instance %s" % self.op.instance_name
1704

    
1705
  def Exec(self, feedback_fn):
1706
    """Remove the instance.
1707

1708
    """
1709
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1710
                 self.cfg.GetNodeName(self.instance.primary_node))
1711

    
1712
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1713
                                             self.instance,
1714
                                             self.op.shutdown_timeout,
1715
                                             self.op.reason)
1716
    if self.op.ignore_failures:
1717
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1718
    else:
1719
      result.Raise("Could not shutdown instance %s on node %s" %
1720
                   (self.instance.name,
1721
                    self.cfg.GetNodeName(self.instance.primary_node)))
1722

    
1723
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1724
            self.owned_locks(locking.LEVEL_NODE_RES))
1725
    assert not (set(self.instance.all_nodes) -
1726
                self.owned_locks(locking.LEVEL_NODE)), \
1727
      "Not owning correct locks"
1728

    
1729
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1730

    
1731

    
1732
class LUInstanceMove(LogicalUnit):
1733
  """Move an instance by data-copying.
1734

1735
  """
1736
  HPATH = "instance-move"
1737
  HTYPE = constants.HTYPE_INSTANCE
1738
  REQ_BGL = False
1739

    
1740
  def ExpandNames(self):
1741
    self._ExpandAndLockInstance()
1742
    (self.op.target_node_uuid, self.op.target_node) = \
1743
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1744
                            self.op.target_node)
1745
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1746
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1747
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1748

    
1749
  def DeclareLocks(self, level):
1750
    if level == locking.LEVEL_NODE:
1751
      self._LockInstancesNodes(primary_only=True)
1752
    elif level == locking.LEVEL_NODE_RES:
1753
      # Copy node locks
1754
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1755
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1756

    
1757
  def BuildHooksEnv(self):
1758
    """Build hooks env.
1759

1760
    This runs on master, primary and target nodes of the instance.
1761

1762
    """
1763
    env = {
1764
      "TARGET_NODE": self.op.target_node,
1765
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1766
      }
1767
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1768
    return env
1769

    
1770
  def BuildHooksNodes(self):
1771
    """Build hooks nodes.
1772

1773
    """
1774
    nl = [
1775
      self.cfg.GetMasterNode(),
1776
      self.instance.primary_node,
1777
      self.op.target_node_uuid,
1778
      ]
1779
    return (nl, nl)
1780

    
1781
  def CheckPrereq(self):
1782
    """Check prerequisites.
1783

1784
    This checks that the instance is in the cluster.
1785

1786
    """
1787
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1788
    assert self.instance is not None, \
1789
      "Cannot retrieve locked instance %s" % self.op.instance_name
1790

    
1791
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1792
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1793
                                 self.instance.disk_template,
1794
                                 errors.ECODE_STATE)
1795

    
1796
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1797
    assert target_node is not None, \
1798
      "Cannot retrieve locked node %s" % self.op.target_node
1799

    
1800
    self.target_node_uuid = target_node.uuid
1801
    if target_node.uuid == self.instance.primary_node:
1802
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1803
                                 (self.instance.name, target_node.name),
1804
                                 errors.ECODE_STATE)
1805

    
1806
    cluster = self.cfg.GetClusterInfo()
1807
    bep = cluster.FillBE(self.instance)
1808

    
1809
    for idx, dsk in enumerate(self.instance.disks):
1810
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1811
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1812
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1813
                                   " cannot copy" % idx, errors.ECODE_STATE)
1814

    
1815
    CheckNodeOnline(self, target_node.uuid)
1816
    CheckNodeNotDrained(self, target_node.uuid)
1817
    CheckNodeVmCapable(self, target_node.uuid)
1818
    group_info = self.cfg.GetNodeGroup(target_node.group)
1819
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1820
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1821
                           ignore=self.op.ignore_ipolicy)
1822

    
1823
    if self.instance.admin_state == constants.ADMINST_UP:
1824
      # check memory requirements on the target node
1825
      CheckNodeFreeMemory(
1826
          self, target_node.uuid, "failing over instance %s" %
1827
          self.instance.name, bep[constants.BE_MAXMEM],
1828
          self.instance.hypervisor,
1829
          cluster.hvparams[self.instance.hypervisor])
1830
    else:
1831
      self.LogInfo("Not checking memory on the secondary node as"
1832
                   " instance will not be started")
1833

    
1834
    # check bridge existance
1835
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1836

    
1837
  def Exec(self, feedback_fn):
1838
    """Move an instance.
1839

1840
    The move is done by shutting it down on its present node, copying
1841
    the data over (slow) and starting it on the new node.
1842

1843
    """
1844
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1845
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1846

    
1847
    self.LogInfo("Shutting down instance %s on source node %s",
1848
                 self.instance.name, source_node.name)
1849

    
1850
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1851
            self.owned_locks(locking.LEVEL_NODE_RES))
1852

    
1853
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1854
                                             self.op.shutdown_timeout,
1855
                                             self.op.reason)
1856
    if self.op.ignore_consistency:
1857
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1858
                  " anyway. Please make sure node %s is down. Error details" %
1859
                  (self.instance.name, source_node.name, source_node.name),
1860
                  self.LogWarning)
1861
    else:
1862
      result.Raise("Could not shutdown instance %s on node %s" %
1863
                   (self.instance.name, source_node.name))
1864

    
1865
    # create the target disks
1866
    try:
1867
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1868
    except errors.OpExecError:
1869
      self.LogWarning("Device creation failed")
1870
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1871
      raise
1872

    
1873
    errs = []
1874
    transfers = []
1875
    # activate, get path, create transfer jobs
1876
    for idx, disk in enumerate(self.instance.disks):
1877
      # FIXME: pass debug option from opcode to backend
1878
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1879
                                         constants.IEIO_RAW_DISK,
1880
                                         (disk, self.instance),
1881
                                         constants.IEIO_RAW_DISK,
1882
                                         (disk, self.instance),
1883
                                         None)
1884
      transfers.append(dt)
1885

    
1886
    import_result = \
1887
      masterd.instance.TransferInstanceData(self, feedback_fn,
1888
                                            source_node.uuid,
1889
                                            target_node.uuid,
1890
                                            target_node.secondary_ip,
1891
                                            self.op.compress,
1892
                                            self.instance, transfers)
1893
    if not compat.all(import_result):
1894
      errs.append("Failed to transfer instance data")
1895

    
1896
    if errs:
1897
      self.LogWarning("Some disks failed to copy, aborting")
1898
      try:
1899
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1900
      finally:
1901
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1902
        raise errors.OpExecError("Errors during disk copy: %s" %
1903
                                 (",".join(errs),))
1904

    
1905
    self.instance.primary_node = target_node.uuid
1906
    self.cfg.Update(self.instance, feedback_fn)
1907

    
1908
    self.LogInfo("Removing the disks on the original node")
1909
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1910

    
1911
    # Only start the instance if it's marked as up
1912
    if self.instance.admin_state == constants.ADMINST_UP:
1913
      self.LogInfo("Starting instance %s on node %s",
1914
                   self.instance.name, target_node.name)
1915

    
1916
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1917
                                          ignore_secondaries=True)
1918
      if not disks_ok:
1919
        ShutdownInstanceDisks(self, self.instance)
1920
        raise errors.OpExecError("Can't activate the instance's disks")
1921

    
1922
      result = self.rpc.call_instance_start(target_node.uuid,
1923
                                            (self.instance, None, None), False,
1924
                                            self.op.reason)
1925
      msg = result.fail_msg
1926
      if msg:
1927
        ShutdownInstanceDisks(self, self.instance)
1928
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1929
                                 (self.instance.name, target_node.name, msg))
1930

    
1931

    
1932
class LUInstanceMultiAlloc(NoHooksLU):
1933
  """Allocates multiple instances at the same time.
1934

1935
  """
1936
  REQ_BGL = False
1937

    
1938
  def CheckArguments(self):
1939
    """Check arguments.
1940

1941
    """
1942
    nodes = []
1943
    for inst in self.op.instances:
1944
      if inst.iallocator is not None:
1945
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1946
                                   " instance objects", errors.ECODE_INVAL)
1947
      nodes.append(bool(inst.pnode))
1948
      if inst.disk_template in constants.DTS_INT_MIRROR:
1949
        nodes.append(bool(inst.snode))
1950

    
1951
    has_nodes = compat.any(nodes)
1952
    if compat.all(nodes) ^ has_nodes:
1953
      raise errors.OpPrereqError("There are instance objects providing"
1954
                                 " pnode/snode while others do not",
1955
                                 errors.ECODE_INVAL)
1956

    
1957
    if not has_nodes and self.op.iallocator is None:
1958
      default_iallocator = self.cfg.GetDefaultIAllocator()
1959
      if default_iallocator:
1960
        self.op.iallocator = default_iallocator
1961
      else:
1962
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1963
                                   " given and no cluster-wide default"
1964
                                   " iallocator found; please specify either"
1965
                                   " an iallocator or nodes on the instances"
1966
                                   " or set a cluster-wide default iallocator",
1967
                                   errors.ECODE_INVAL)
1968

    
1969
    _CheckOpportunisticLocking(self.op)
1970

    
1971
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1972
    if dups:
1973
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1974
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1975

    
1976
  def ExpandNames(self):
1977
    """Calculate the locks.
1978

1979
    """
1980
    self.share_locks = ShareAll()
1981
    self.needed_locks = {
1982
      # iallocator will select nodes and even if no iallocator is used,
1983
      # collisions with LUInstanceCreate should be avoided
1984
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1985
      }
1986

    
1987
    if self.op.iallocator:
1988
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1989
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1990

    
1991
      if self.op.opportunistic_locking:
1992
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1993
    else:
1994
      nodeslist = []
1995
      for inst in self.op.instances:
1996
        (inst.pnode_uuid, inst.pnode) = \
1997
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1998
        nodeslist.append(inst.pnode_uuid)
1999
        if inst.snode is not None:
2000
          (inst.snode_uuid, inst.snode) = \
2001
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2002
          nodeslist.append(inst.snode_uuid)
2003

    
2004
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2005
      # Lock resources of instance's primary and secondary nodes (copy to
2006
      # prevent accidential modification)
2007
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2008

    
2009
  def DeclareLocks(self, level):
2010
    if level == locking.LEVEL_NODE_RES and \
2011
      self.opportunistic_locks[locking.LEVEL_NODE]:
2012
      # Even when using opportunistic locking, we require the same set of
2013
      # NODE_RES locks as we got NODE locks
2014
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2015
        self.owned_locks(locking.LEVEL_NODE)
2016

    
2017
  def CheckPrereq(self):
2018
    """Check prerequisite.
2019

2020
    """
2021
    if self.op.iallocator:
2022
      cluster = self.cfg.GetClusterInfo()
2023
      default_vg = self.cfg.GetVGName()
2024
      ec_id = self.proc.GetECId()
2025

    
2026
      if self.op.opportunistic_locking:
2027
        # Only consider nodes for which a lock is held
2028
        node_whitelist = self.cfg.GetNodeNames(
2029
                           list(self.owned_locks(locking.LEVEL_NODE)))
2030
      else:
2031
        node_whitelist = None
2032

    
2033
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2034
                                           _ComputeNics(op, cluster, None,
2035
                                                        self.cfg, ec_id),
2036
                                           _ComputeFullBeParams(op, cluster),
2037
                                           node_whitelist)
2038
               for op in self.op.instances]
2039

    
2040
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2041
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2042

    
2043
      ial.Run(self.op.iallocator)
2044

    
2045
      if not ial.success:
2046
        raise errors.OpPrereqError("Can't compute nodes using"
2047
                                   " iallocator '%s': %s" %
2048
                                   (self.op.iallocator, ial.info),
2049
                                   errors.ECODE_NORES)
2050

    
2051
      self.ia_result = ial.result
2052

    
2053
    if self.op.dry_run:
2054
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2055
        constants.JOB_IDS_KEY: [],
2056
        })
2057

    
2058
  def _ConstructPartialResult(self):
2059
    """Contructs the partial result.
2060

2061
    """
2062
    if self.op.iallocator:
2063
      (allocatable, failed_insts) = self.ia_result
2064
      allocatable_insts = map(compat.fst, allocatable)
2065
    else:
2066
      allocatable_insts = [op.instance_name for op in self.op.instances]
2067
      failed_insts = []
2068

    
2069
    return {
2070
      constants.ALLOCATABLE_KEY: allocatable_insts,
2071
      constants.FAILED_KEY: failed_insts,
2072
      }
2073

    
2074
  def Exec(self, feedback_fn):
2075
    """Executes the opcode.
2076

2077
    """
2078
    jobs = []
2079
    if self.op.iallocator:
2080
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2081
      (allocatable, failed) = self.ia_result
2082

    
2083
      for (name, node_names) in allocatable:
2084
        op = op2inst.pop(name)
2085

    
2086
        (op.pnode_uuid, op.pnode) = \
2087
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2088
        if len(node_names) > 1:
2089
          (op.snode_uuid, op.snode) = \
2090
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2091

    
2092
          jobs.append([op])
2093

    
2094
        missing = set(op2inst.keys()) - set(failed)
2095
        assert not missing, \
2096
          "Iallocator did return incomplete result: %s" % \
2097
          utils.CommaJoin(missing)
2098
    else:
2099
      jobs.extend([op] for op in self.op.instances)
2100

    
2101
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2102

    
2103

    
2104
class _InstNicModPrivate:
2105
  """Data structure for network interface modifications.
2106

2107
  Used by L{LUInstanceSetParams}.
2108

2109
  """
2110
  def __init__(self):
2111
    self.params = None
2112
    self.filled = None
2113

    
2114

    
2115
def _PrepareContainerMods(mods, private_fn):
2116
  """Prepares a list of container modifications by adding a private data field.
2117

2118
  @type mods: list of tuples; (operation, index, parameters)
2119
  @param mods: List of modifications
2120
  @type private_fn: callable or None
2121
  @param private_fn: Callable for constructing a private data field for a
2122
    modification
2123
  @rtype: list
2124

2125
  """
2126
  if private_fn is None:
2127
    fn = lambda: None
2128
  else:
2129
    fn = private_fn
2130

    
2131
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2132

    
2133

    
2134
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2135
  """Checks if nodes have enough physical CPUs
2136

2137
  This function checks if all given nodes have the needed number of
2138
  physical CPUs. In case any node has less CPUs or we cannot get the
2139
  information from the node, this function raises an OpPrereqError
2140
  exception.
2141

2142
  @type lu: C{LogicalUnit}
2143
  @param lu: a logical unit from which we get configuration data
2144
  @type node_uuids: C{list}
2145
  @param node_uuids: the list of node UUIDs to check
2146
  @type requested: C{int}
2147
  @param requested: the minimum acceptable number of physical CPUs
2148
  @type hypervisor_specs: list of pairs (string, dict of strings)
2149
  @param hypervisor_specs: list of hypervisor specifications in
2150
      pairs (hypervisor_name, hvparams)
2151
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2152
      or we cannot check the node
2153

2154
  """
2155
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2156
  for node_uuid in node_uuids:
2157
    info = nodeinfo[node_uuid]
2158
    node_name = lu.cfg.GetNodeName(node_uuid)
2159
    info.Raise("Cannot get current information from node %s" % node_name,
2160
               prereq=True, ecode=errors.ECODE_ENVIRON)
2161
    (_, _, (hv_info, )) = info.payload
2162
    num_cpus = hv_info.get("cpu_total", None)
2163
    if not isinstance(num_cpus, int):
2164
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2165
                                 " on node %s, result was '%s'" %
2166
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2167
    if requested > num_cpus:
2168
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2169
                                 "required" % (node_name, num_cpus, requested),
2170
                                 errors.ECODE_NORES)
2171

    
2172

    
2173
def GetItemFromContainer(identifier, kind, container):
2174
  """Return the item refered by the identifier.
2175

2176
  @type identifier: string
2177
  @param identifier: Item index or name or UUID
2178
  @type kind: string
2179
  @param kind: One-word item description
2180
  @type container: list
2181
  @param container: Container to get the item from
2182

2183
  """
2184
  # Index
2185
  try:
2186
    idx = int(identifier)
2187
    if idx == -1:
2188
      # Append
2189
      absidx = len(container) - 1
2190
    elif idx < 0:
2191
      raise IndexError("Not accepting negative indices other than -1")
2192
    elif idx > len(container):
2193
      raise IndexError("Got %s index %s, but there are only %s" %
2194
                       (kind, idx, len(container)))
2195
    else:
2196
      absidx = idx
2197
    return (absidx, container[idx])
2198
  except ValueError:
2199
    pass
2200

    
2201
  for idx, item in enumerate(container):
2202
    if item.uuid == identifier or item.name == identifier:
2203
      return (idx, item)
2204

    
2205
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2206
                             (kind, identifier), errors.ECODE_NOENT)
2207

    
2208

    
2209
def _ApplyContainerMods(kind, container, chgdesc, mods,
2210
                        create_fn, modify_fn, remove_fn,
2211
                        post_add_fn=None):
2212
  """Applies descriptions in C{mods} to C{container}.
2213

2214
  @type kind: string
2215
  @param kind: One-word item description
2216
  @type container: list
2217
  @param container: Container to modify
2218
  @type chgdesc: None or list
2219
  @param chgdesc: List of applied changes
2220
  @type mods: list
2221
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2222
  @type create_fn: callable
2223
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2224
    receives absolute item index, parameters and private data object as added
2225
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2226
    as list
2227
  @type modify_fn: callable
2228
  @param modify_fn: Callback for modifying an existing item
2229
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2230
    and private data object as added by L{_PrepareContainerMods}, returns
2231
    changes as list
2232
  @type remove_fn: callable
2233
  @param remove_fn: Callback on removing item; receives absolute item index,
2234
    item and private data object as added by L{_PrepareContainerMods}
2235
  @type post_add_fn: callable
2236
  @param post_add_fn: Callable for post-processing a newly created item after
2237
    it has been put into the container. It receives the index of the new item
2238
    and the new item as parameters.
2239

2240
  """
2241
  for (op, identifier, params, private) in mods:
2242
    changes = None
2243

    
2244
    if op == constants.DDM_ADD:
2245
      # Calculate where item will be added
2246
      # When adding an item, identifier can only be an index
2247
      try:
2248
        idx = int(identifier)
2249
      except ValueError:
2250
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2251
                                   " identifier for %s" % constants.DDM_ADD,
2252
                                   errors.ECODE_INVAL)
2253
      if idx == -1:
2254
        addidx = len(container)
2255
      else:
2256
        if idx < 0:
2257
          raise IndexError("Not accepting negative indices other than -1")
2258
        elif idx > len(container):
2259
          raise IndexError("Got %s index %s, but there are only %s" %
2260
                           (kind, idx, len(container)))
2261
        addidx = idx
2262

    
2263
      if create_fn is None:
2264
        item = params
2265
      else:
2266
        (item, changes) = create_fn(addidx, params, private)
2267

    
2268
      if idx == -1:
2269
        container.append(item)
2270
      else:
2271
        assert idx >= 0
2272
        assert idx <= len(container)
2273
        # list.insert does so before the specified index
2274
        container.insert(idx, item)
2275

    
2276
      if post_add_fn is not None:
2277
        post_add_fn(addidx, item)
2278

    
2279
    else:
2280
      # Retrieve existing item
2281
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2282

    
2283
      if op == constants.DDM_REMOVE:
2284
        assert not params
2285

    
2286
        changes = [("%s/%s" % (kind, absidx), "remove")]
2287

    
2288
        if remove_fn is not None:
2289
          msg = remove_fn(absidx, item, private)
2290
          if msg:
2291
            changes.append(("%s/%s" % (kind, absidx), msg))
2292

    
2293
        assert container[absidx] == item
2294
        del container[absidx]
2295
      elif op == constants.DDM_MODIFY:
2296
        if modify_fn is not None:
2297
          changes = modify_fn(absidx, item, params, private)
2298
      else:
2299
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2300

    
2301
    assert _TApplyContModsCbChanges(changes)
2302

    
2303
    if not (chgdesc is None or changes is None):
2304
      chgdesc.extend(changes)
2305

    
2306

    
2307
def _UpdateIvNames(base_index, disks):
2308
  """Updates the C{iv_name} attribute of disks.
2309

2310
  @type disks: list of L{objects.Disk}
2311

2312
  """
2313
  for (idx, disk) in enumerate(disks):
2314
    disk.iv_name = "disk/%s" % (base_index + idx, )
2315

    
2316

    
2317
class LUInstanceSetParams(LogicalUnit):
2318
  """Modifies an instances's parameters.
2319

2320
  """
2321
  HPATH = "instance-modify"
2322
  HTYPE = constants.HTYPE_INSTANCE
2323
  REQ_BGL = False
2324

    
2325
  @staticmethod
2326
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2327
    assert ht.TList(mods)
2328
    assert not mods or len(mods[0]) in (2, 3)
2329

    
2330
    if mods and len(mods[0]) == 2:
2331
      result = []
2332

    
2333
      addremove = 0
2334
      for op, params in mods:
2335
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2336
          result.append((op, -1, params))
2337
          addremove += 1
2338

    
2339
          if addremove > 1:
2340
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2341
                                       " supported at a time" % kind,
2342
                                       errors.ECODE_INVAL)
2343
        else:
2344
          result.append((constants.DDM_MODIFY, op, params))
2345

    
2346
      assert verify_fn(result)
2347
    else:
2348
      result = mods
2349

    
2350
    return result
2351

    
2352
  @staticmethod
2353
  def _CheckMods(kind, mods, key_types, item_fn):
2354
    """Ensures requested disk/NIC modifications are valid.
2355

2356
    """
2357
    for (op, _, params) in mods:
2358
      assert ht.TDict(params)
2359

    
2360
      # If 'key_types' is an empty dict, we assume we have an
2361
      # 'ext' template and thus do not ForceDictType
2362
      if key_types:
2363
        utils.ForceDictType(params, key_types)
2364

    
2365
      if op == constants.DDM_REMOVE:
2366
        if params:
2367
          raise errors.OpPrereqError("No settings should be passed when"
2368
                                     " removing a %s" % kind,
2369
                                     errors.ECODE_INVAL)
2370
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2371
        item_fn(op, params)
2372
      else:
2373
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2374

    
2375
  def _VerifyDiskModification(self, op, params, excl_stor):
2376
    """Verifies a disk modification.
2377

2378
    """
2379
    if op == constants.DDM_ADD:
2380
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2381
      if mode not in constants.DISK_ACCESS_SET:
2382
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2383
                                   errors.ECODE_INVAL)
2384

    
2385
      size = params.get(constants.IDISK_SIZE, None)
2386
      if size is None:
2387
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2388
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2389
      size = int(size)
2390

    
2391
      params[constants.IDISK_SIZE] = size
2392
      name = params.get(constants.IDISK_NAME, None)
2393
      if name is not None and name.lower() == constants.VALUE_NONE:
2394
        params[constants.IDISK_NAME] = None
2395

    
2396
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2397

    
2398
    elif op == constants.DDM_MODIFY:
2399
      if constants.IDISK_SIZE in params:
2400
        raise errors.OpPrereqError("Disk size change not possible, use"
2401
                                   " grow-disk", errors.ECODE_INVAL)
2402

    
2403
      # Disk modification supports changing only the disk name and mode.
2404
      # Changing arbitrary parameters is allowed only for ext disk template",
2405
      if self.instance.disk_template != constants.DT_EXT:
2406
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2407

    
2408
      name = params.get(constants.IDISK_NAME, None)
2409
      if name is not None and name.lower() == constants.VALUE_NONE:
2410
        params[constants.IDISK_NAME] = None
2411

    
2412
  @staticmethod
2413
  def _VerifyNicModification(op, params):
2414
    """Verifies a network interface modification.
2415

2416
    """
2417
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2418
      ip = params.get(constants.INIC_IP, None)
2419
      name = params.get(constants.INIC_NAME, None)
2420
      req_net = params.get(constants.INIC_NETWORK, None)
2421
      link = params.get(constants.NIC_LINK, None)
2422
      mode = params.get(constants.NIC_MODE, None)
2423
      if name is not None and name.lower() == constants.VALUE_NONE:
2424
        params[constants.INIC_NAME] = None
2425
      if req_net is not None:
2426
        if req_net.lower() == constants.VALUE_NONE:
2427
          params[constants.INIC_NETWORK] = None
2428
          req_net = None
2429
        elif link is not None or mode is not None:
2430
          raise errors.OpPrereqError("If network is given"
2431
                                     " mode or link should not",
2432
                                     errors.ECODE_INVAL)
2433

    
2434
      if op == constants.DDM_ADD:
2435
        macaddr = params.get(constants.INIC_MAC, None)
2436
        if macaddr is None:
2437
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2438

    
2439
      if ip is not None:
2440
        if ip.lower() == constants.VALUE_NONE:
2441
          params[constants.INIC_IP] = None
2442
        else:
2443
          if ip.lower() == constants.NIC_IP_POOL:
2444
            if op == constants.DDM_ADD and req_net is None:
2445
              raise errors.OpPrereqError("If ip=pool, parameter network"
2446
                                         " cannot be none",
2447
                                         errors.ECODE_INVAL)
2448
          else:
2449
            if not netutils.IPAddress.IsValid(ip):
2450
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2451
                                         errors.ECODE_INVAL)
2452

    
2453
      if constants.INIC_MAC in params:
2454
        macaddr = params[constants.INIC_MAC]
2455
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2456
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2457

    
2458
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2459
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2460
                                     " modifying an existing NIC",
2461
                                     errors.ECODE_INVAL)
2462

    
2463
  def CheckArguments(self):
2464
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2465
            self.op.hvparams or self.op.beparams or self.op.os_name or
2466
            self.op.osparams or self.op.offline is not None or
2467
            self.op.runtime_mem or self.op.pnode):
2468
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2469

    
2470
    if self.op.hvparams:
2471
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2472
                           "hypervisor", "instance", "cluster")
2473

    
2474
    self.op.disks = self._UpgradeDiskNicMods(
2475
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2476
    self.op.nics = self._UpgradeDiskNicMods(
2477
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2478

    
2479
    if self.op.disks and self.op.disk_template is not None:
2480
      raise errors.OpPrereqError("Disk template conversion and other disk"
2481
                                 " changes not supported at the same time",
2482
                                 errors.ECODE_INVAL)
2483

    
2484
    if (self.op.disk_template and
2485
        self.op.disk_template in constants.DTS_INT_MIRROR and
2486
        self.op.remote_node is None):
2487
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2488
                                 " one requires specifying a secondary node",
2489
                                 errors.ECODE_INVAL)
2490

    
2491
    # Check NIC modifications
2492
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2493
                    self._VerifyNicModification)
2494

    
2495
    if self.op.pnode:
2496
      (self.op.pnode_uuid, self.op.pnode) = \
2497
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2498

    
2499
  def ExpandNames(self):
2500
    self._ExpandAndLockInstance()
2501
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2502
    # Can't even acquire node locks in shared mode as upcoming changes in
2503
    # Ganeti 2.6 will start to modify the node object on disk conversion
2504
    self.needed_locks[locking.LEVEL_NODE] = []
2505
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2506
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2507
    # Look node group to look up the ipolicy
2508
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2509

    
2510
  def DeclareLocks(self, level):
2511
    if level == locking.LEVEL_NODEGROUP:
2512
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2513
      # Acquire locks for the instance's nodegroups optimistically. Needs
2514
      # to be verified in CheckPrereq
2515
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2516
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2517
    elif level == locking.LEVEL_NODE:
2518
      self._LockInstancesNodes()
2519
      if self.op.disk_template and self.op.remote_node:
2520
        (self.op.remote_node_uuid, self.op.remote_node) = \
2521
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2522
                                self.op.remote_node)
2523
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2524
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2525
      # Copy node locks
2526
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2527
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2528

    
2529
  def BuildHooksEnv(self):
2530
    """Build hooks env.
2531

2532
    This runs on the master, primary and secondaries.
2533

2534
    """
2535
    args = {}
2536
    if constants.BE_MINMEM in self.be_new:
2537
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2538
    if constants.BE_MAXMEM in self.be_new:
2539
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2540
    if constants.BE_VCPUS in self.be_new:
2541
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2542
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2543
    # information at all.
2544

    
2545
    if self._new_nics is not None:
2546
      nics = []
2547

    
2548
      for nic in self._new_nics:
2549
        n = copy.deepcopy(nic)
2550
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2551
        n.nicparams = nicparams
2552
        nics.append(NICToTuple(self, n))
2553

    
2554
      args["nics"] = nics
2555

    
2556
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2557
    if self.op.disk_template:
2558
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2559
    if self.op.runtime_mem:
2560
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2561

    
2562
    return env
2563

    
2564
  def BuildHooksNodes(self):
2565
    """Build hooks nodes.
2566

2567
    """
2568
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2569
    return (nl, nl)
2570

    
2571
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2572
                              old_params, cluster, pnode_uuid):
2573

    
2574
    update_params_dict = dict([(key, params[key])
2575
                               for key in constants.NICS_PARAMETERS
2576
                               if key in params])
2577

    
2578
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2579
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2580

    
2581
    new_net_uuid = None
2582
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2583
    if new_net_uuid_or_name:
2584
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2585
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2586

    
2587
    if old_net_uuid:
2588
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2589

    
2590
    if new_net_uuid:
2591
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2592
      if not netparams:
2593
        raise errors.OpPrereqError("No netparams found for the network"
2594
                                   " %s, probably not connected" %
2595
                                   new_net_obj.name, errors.ECODE_INVAL)
2596
      new_params = dict(netparams)
2597
    else:
2598
      new_params = GetUpdatedParams(old_params, update_params_dict)
2599

    
2600
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2601

    
2602
    new_filled_params = cluster.SimpleFillNIC(new_params)
2603
    objects.NIC.CheckParameterSyntax(new_filled_params)
2604

    
2605
    new_mode = new_filled_params[constants.NIC_MODE]
2606
    if new_mode == constants.NIC_MODE_BRIDGED:
2607
      bridge = new_filled_params[constants.NIC_LINK]
2608
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2609
      if msg:
2610
        msg = "Error checking bridges on node '%s': %s" % \
2611
                (self.cfg.GetNodeName(pnode_uuid), msg)
2612
        if self.op.force:
2613
          self.warn.append(msg)
2614
        else:
2615
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2616

    
2617
    elif new_mode == constants.NIC_MODE_ROUTED:
2618
      ip = params.get(constants.INIC_IP, old_ip)
2619
      if ip is None:
2620
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2621
                                   " on a routed NIC", errors.ECODE_INVAL)
2622

    
2623
    elif new_mode == constants.NIC_MODE_OVS:
2624
      # TODO: check OVS link
2625
      self.LogInfo("OVS links are currently not checked for correctness")
2626

    
2627
    if constants.INIC_MAC in params:
2628
      mac = params[constants.INIC_MAC]
2629
      if mac is None:
2630
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2631
                                   errors.ECODE_INVAL)
2632
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2633
        # otherwise generate the MAC address
2634
        params[constants.INIC_MAC] = \
2635
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2636
      else:
2637
        # or validate/reserve the current one
2638
        try:
2639
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2640
        except errors.ReservationError:
2641
          raise errors.OpPrereqError("MAC address '%s' already in use"
2642
                                     " in cluster" % mac,
2643
                                     errors.ECODE_NOTUNIQUE)
2644
    elif new_net_uuid != old_net_uuid:
2645

    
2646
      def get_net_prefix(net_uuid):
2647
        mac_prefix = None
2648
        if net_uuid:
2649
          nobj = self.cfg.GetNetwork(net_uuid)
2650
          mac_prefix = nobj.mac_prefix
2651

    
2652
        return mac_prefix
2653

    
2654
      new_prefix = get_net_prefix(new_net_uuid)
2655
      old_prefix = get_net_prefix(old_net_uuid)
2656
      if old_prefix != new_prefix:
2657
        params[constants.INIC_MAC] = \
2658
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2659

    
2660
    # if there is a change in (ip, network) tuple
2661
    new_ip = params.get(constants.INIC_IP, old_ip)
2662
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2663
      if new_ip:
2664
        # if IP is pool then require a network and generate one IP
2665
        if new_ip.lower() == constants.NIC_IP_POOL:
2666
          if new_net_uuid:
2667
            try:
2668
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2669
            except errors.ReservationError:
2670
              raise errors.OpPrereqError("Unable to get a free IP"
2671
                                         " from the address pool",
2672
                                         errors.ECODE_STATE)
2673
            self.LogInfo("Chose IP %s from network %s",
2674
                         new_ip,
2675
                         new_net_obj.name)
2676
            params[constants.INIC_IP] = new_ip
2677
          else:
2678
            raise errors.OpPrereqError("ip=pool, but no network found",
2679
                                       errors.ECODE_INVAL)
2680
        # Reserve new IP if in the new network if any
2681
        elif new_net_uuid:
2682
          try:
2683
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2684
                               check=self.op.conflicts_check)
2685
            self.LogInfo("Reserving IP %s in network %s",
2686
                         new_ip, new_net_obj.name)
2687
          except errors.ReservationError:
2688
            raise errors.OpPrereqError("IP %s not available in network %s" %
2689
                                       (new_ip, new_net_obj.name),
2690
                                       errors.ECODE_NOTUNIQUE)
2691
        # new network is None so check if new IP is a conflicting IP
2692
        elif self.op.conflicts_check:
2693
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2694

    
2695
      # release old IP if old network is not None
2696
      if old_ip and old_net_uuid:
2697
        try:
2698
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2699
        except errors.AddressPoolError:
2700
          logging.warning("Release IP %s not contained in network %s",
2701
                          old_ip, old_net_obj.name)
2702

    
2703
    # there are no changes in (ip, network) tuple and old network is not None
2704
    elif (old_net_uuid is not None and
2705
          (req_link is not None or req_mode is not None)):
2706
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2707
                                 " a NIC that is connected to a network",
2708
                                 errors.ECODE_INVAL)
2709

    
2710
    private.params = new_params
2711
    private.filled = new_filled_params
2712

    
2713
  def _PreCheckDiskTemplate(self, pnode_info):
2714
    """CheckPrereq checks related to a new disk template."""
2715
    # Arguments are passed to avoid configuration lookups
2716
    pnode_uuid = self.instance.primary_node
2717
    if self.instance.disk_template == self.op.disk_template:
2718
      raise errors.OpPrereqError("Instance already has disk template %s" %
2719
                                 self.instance.disk_template,
2720
                                 errors.ECODE_INVAL)
2721

    
2722
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2723
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2724
                                 " cluster." % self.op.disk_template)
2725

    
2726
    if (self.instance.disk_template,
2727
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2728
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2729
                                 " %s to %s" % (self.instance.disk_template,
2730
                                                self.op.disk_template),
2731
                                 errors.ECODE_INVAL)
2732
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2733
                       msg="cannot change disk template")
2734
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2735
      if self.op.remote_node_uuid == pnode_uuid:
2736
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2737
                                   " as the primary node of the instance" %
2738
                                   self.op.remote_node, errors.ECODE_STATE)
2739
      CheckNodeOnline(self, self.op.remote_node_uuid)
2740
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2741
      # FIXME: here we assume that the old instance type is DT_PLAIN
2742
      assert self.instance.disk_template == constants.DT_PLAIN
2743
      disks = [{constants.IDISK_SIZE: d.size,
2744
                constants.IDISK_VG: d.logical_id[0]}
2745
               for d in self.instance.disks]
2746
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2747
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2748

    
2749
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2750
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2751
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2752
                                                              snode_group)
2753
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2754
                             ignore=self.op.ignore_ipolicy)
2755
      if pnode_info.group != snode_info.group:
2756
        self.LogWarning("The primary and secondary nodes are in two"
2757
                        " different node groups; the disk parameters"
2758
                        " from the first disk's node group will be"
2759
                        " used")
2760

    
2761
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2762
      # Make sure none of the nodes require exclusive storage
2763
      nodes = [pnode_info]
2764
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2765
        assert snode_info
2766
        nodes.append(snode_info)
2767
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2768
      if compat.any(map(has_es, nodes)):
2769
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2770
                  " storage is enabled" % (self.instance.disk_template,
2771
                                           self.op.disk_template))
2772
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2773

    
2774
  def _PreCheckDisks(self, ispec):
2775
    """CheckPrereq checks related to disk changes.
2776

2777
    @type ispec: dict
2778
    @param ispec: instance specs to be updated with the new disks
2779

2780
    """
2781
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2782

    
2783
    excl_stor = compat.any(
2784
      rpc.GetExclusiveStorageForNodes(self.cfg,
2785
                                      self.instance.all_nodes).values()
2786
      )
2787

    
2788
    # Check disk modifications. This is done here and not in CheckArguments
2789
    # (as with NICs), because we need to know the instance's disk template
2790
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2791
    if self.instance.disk_template == constants.DT_EXT:
2792
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2793
    else:
2794
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2795
                      ver_fn)
2796

    
2797
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2798

    
2799
    # Check the validity of the `provider' parameter
2800
    if self.instance.disk_template in constants.DT_EXT:
2801
      for mod in self.diskmod:
2802
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2803
        if mod[0] == constants.DDM_ADD:
2804
          if ext_provider is None:
2805
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2806
                                       " '%s' missing, during disk add" %
2807
                                       (constants.DT_EXT,
2808
                                        constants.IDISK_PROVIDER),
2809
                                       errors.ECODE_NOENT)
2810
        elif mod[0] == constants.DDM_MODIFY:
2811
          if ext_provider:
2812
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2813
                                       " modification" %
2814
                                       constants.IDISK_PROVIDER,
2815
                                       errors.ECODE_INVAL)
2816
    else:
2817
      for mod in self.diskmod:
2818
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2819
        if ext_provider is not None:
2820
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2821
                                     " instances of type '%s'" %
2822
                                     (constants.IDISK_PROVIDER,
2823
                                      constants.DT_EXT),
2824
                                     errors.ECODE_INVAL)
2825

    
2826
    if not self.op.wait_for_sync and self.instance.disks_active:
2827
      for mod in self.diskmod:
2828
        if mod[0] == constants.DDM_ADD:
2829
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2830
                                     " activated disks and"
2831
                                     " --no-wait-for-sync given.",
2832
                                     errors.ECODE_INVAL)
2833

    
2834
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2835
      raise errors.OpPrereqError("Disk operations not supported for"
2836
                                 " diskless instances", errors.ECODE_INVAL)
2837

    
2838
    def _PrepareDiskMod(_, disk, params, __):
2839
      disk.name = params.get(constants.IDISK_NAME, None)
2840

    
2841
    # Verify disk changes (operating on a copy)
2842
    disks = copy.deepcopy(self.instance.disks)
2843
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2844
                        _PrepareDiskMod, None)
2845
    utils.ValidateDeviceNames("disk", disks)
2846
    if len(disks) > constants.MAX_DISKS:
2847
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2848
                                 " more" % constants.MAX_DISKS,
2849
                                 errors.ECODE_STATE)
2850
    disk_sizes = [disk.size for disk in self.instance.disks]
2851
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2852
                      self.diskmod if op == constants.DDM_ADD)
2853
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2854
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2855

    
2856
    if self.op.offline is not None and self.op.offline:
2857
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2858
                         msg="can't change to offline")
2859

    
2860
  def CheckPrereq(self):
2861
    """Check prerequisites.
2862

2863
    This only checks the instance list against the existing names.
2864

2865
    """
2866
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2867
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2868
    self.cluster = self.cfg.GetClusterInfo()
2869
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2870

    
2871
    assert self.instance is not None, \
2872
      "Cannot retrieve locked instance %s" % self.op.instance_name
2873

    
2874
    pnode_uuid = self.instance.primary_node
2875

    
2876
    self.warn = []
2877

    
2878
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2879
        not self.op.force):
2880
      # verify that the instance is not up
2881
      instance_info = self.rpc.call_instance_info(
2882
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2883
          cluster_hvparams)
2884
      if instance_info.fail_msg:
2885
        self.warn.append("Can't get instance runtime information: %s" %
2886
                         instance_info.fail_msg)
2887
      elif instance_info.payload:
2888
        raise errors.OpPrereqError("Instance is still running on %s" %
2889
                                   self.cfg.GetNodeName(pnode_uuid),
2890
                                   errors.ECODE_STATE)
2891

    
2892
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2893
    node_uuids = list(self.instance.all_nodes)
2894
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2895

    
2896
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2897
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2898
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2899

    
2900
    # dictionary with instance information after the modification
2901
    ispec = {}
2902

    
2903
    if self.op.hotplug or self.op.hotplug_if_possible:
2904
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2905
                                               self.instance)
2906
      if result.fail_msg:
2907
        if self.op.hotplug:
2908
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2909
                       prereq=True)
2910
        else:
2911
          self.LogWarning(result.fail_msg)
2912
          self.op.hotplug = False
2913
          self.LogInfo("Modification will take place without hotplugging.")
2914
      else:
2915
        self.op.hotplug = True
2916

    
2917
    # Prepare NIC modifications
2918
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2919

    
2920
    # OS change
2921
    if self.op.os_name and not self.op.force:
2922
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2923
                     self.op.force_variant)
2924
      instance_os = self.op.os_name
2925
    else:
2926
      instance_os = self.instance.os
2927

    
2928
    assert not (self.op.disk_template and self.op.disks), \
2929
      "Can't modify disk template and apply disk changes at the same time"
2930

    
2931
    if self.op.disk_template:
2932
      self._PreCheckDiskTemplate(pnode_info)
2933

    
2934
    self._PreCheckDisks(ispec)
2935

    
2936
    # hvparams processing
2937
    if self.op.hvparams:
2938
      hv_type = self.instance.hypervisor
2939
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2940
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2941
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2942

    
2943
      # local check
2944
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2945
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2946
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2947
      self.hv_inst = i_hvdict # the new dict (without defaults)
2948
    else:
2949
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2950
                                                   self.instance.os,
2951
                                                   self.instance.hvparams)
2952
      self.hv_new = self.hv_inst = {}
2953

    
2954
    # beparams processing
2955
    if self.op.beparams:
2956
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2957
                                  use_none=True)
2958
      objects.UpgradeBeParams(i_bedict)
2959
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2960
      be_new = self.cluster.SimpleFillBE(i_bedict)
2961
      self.be_proposed = self.be_new = be_new # the new actual values
2962
      self.be_inst = i_bedict # the new dict (without defaults)
2963
    else:
2964
      self.be_new = self.be_inst = {}
2965
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2966
    be_old = self.cluster.FillBE(self.instance)
2967

    
2968
    # CPU param validation -- checking every time a parameter is
2969
    # changed to cover all cases where either CPU mask or vcpus have
2970
    # changed
2971
    if (constants.BE_VCPUS in self.be_proposed and
2972
        constants.HV_CPU_MASK in self.hv_proposed):
2973
      cpu_list = \
2974
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2975
      # Verify mask is consistent with number of vCPUs. Can skip this
2976
      # test if only 1 entry in the CPU mask, which means same mask
2977
      # is applied to all vCPUs.
2978
      if (len(cpu_list) > 1 and
2979
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2980
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2981
                                   " CPU mask [%s]" %
2982
                                   (self.be_proposed[constants.BE_VCPUS],
2983
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2984
                                   errors.ECODE_INVAL)
2985

    
2986
      # Only perform this test if a new CPU mask is given
2987
      if constants.HV_CPU_MASK in self.hv_new:
2988
        # Calculate the largest CPU number requested
2989
        max_requested_cpu = max(map(max, cpu_list))
2990
        # Check that all of the instance's nodes have enough physical CPUs to
2991
        # satisfy the requested CPU mask
2992
        hvspecs = [(self.instance.hypervisor,
2993
                    self.cfg.GetClusterInfo()
2994
                      .hvparams[self.instance.hypervisor])]
2995
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2996
                                max_requested_cpu + 1,
2997
                                hvspecs)
2998

    
2999
    # osparams processing
3000
    if self.op.osparams:
3001
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
3002
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
3003
      self.os_inst = i_osdict # the new dict (without defaults)
3004
    else:
3005
      self.os_inst = {}
3006

    
3007
    #TODO(dynmem): do the appropriate check involving MINMEM
3008
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3009
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3010
      mem_check_list = [pnode_uuid]
3011
      if be_new[constants.BE_AUTO_BALANCE]:
3012
        # either we changed auto_balance to yes or it was from before
3013
        mem_check_list.extend(self.instance.secondary_nodes)
3014
      instance_info = self.rpc.call_instance_info(
3015
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3016
          cluster_hvparams)
3017
      hvspecs = [(self.instance.hypervisor,
3018
                  cluster_hvparams)]
3019
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3020
                                         hvspecs)
3021
      pninfo = nodeinfo[pnode_uuid]
3022
      msg = pninfo.fail_msg
3023
      if msg:
3024
        # Assume the primary node is unreachable and go ahead
3025
        self.warn.append("Can't get info from primary node %s: %s" %
3026
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3027
      else:
3028
        (_, _, (pnhvinfo, )) = pninfo.payload
3029
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3030
          self.warn.append("Node data from primary node %s doesn't contain"
3031
                           " free memory information" %
3032
                           self.cfg.GetNodeName(pnode_uuid))
3033
        elif instance_info.fail_msg:
3034
          self.warn.append("Can't get instance runtime information: %s" %
3035
                           instance_info.fail_msg)
3036
        else:
3037
          if instance_info.payload:
3038
            current_mem = int(instance_info.payload["memory"])
3039
          else:
3040
            # Assume instance not running
3041
            # (there is a slight race condition here, but it's not very
3042
            # probable, and we have no other way to check)
3043
            # TODO: Describe race condition
3044
            current_mem = 0
3045
          #TODO(dynmem): do the appropriate check involving MINMEM
3046
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3047
                      pnhvinfo["memory_free"])
3048
          if miss_mem > 0:
3049
            raise errors.OpPrereqError("This change will prevent the instance"
3050
                                       " from starting, due to %d MB of memory"
3051
                                       " missing on its primary node" %
3052
                                       miss_mem, errors.ECODE_NORES)
3053

    
3054
      if be_new[constants.BE_AUTO_BALANCE]:
3055
        for node_uuid, nres in nodeinfo.items():
3056
          if node_uuid not in self.instance.secondary_nodes:
3057
            continue
3058
          nres.Raise("Can't get info from secondary node %s" %
3059
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3060
                     ecode=errors.ECODE_STATE)
3061
          (_, _, (nhvinfo, )) = nres.payload
3062
          if not isinstance(nhvinfo.get("memory_free", None), int):
3063
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3064
                                       " memory information" %
3065
                                       self.cfg.GetNodeName(node_uuid),
3066
                                       errors.ECODE_STATE)
3067
          #TODO(dynmem): do the appropriate check involving MINMEM
3068
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3069
            raise errors.OpPrereqError("This change will prevent the instance"
3070
                                       " from failover to its secondary node"
3071
                                       " %s, due to not enough memory" %
3072
                                       self.cfg.GetNodeName(node_uuid),
3073
                                       errors.ECODE_STATE)
3074

    
3075
    if self.op.runtime_mem:
3076
      remote_info = self.rpc.call_instance_info(
3077
         self.instance.primary_node, self.instance.name,
3078
         self.instance.hypervisor,
3079
         cluster_hvparams)
3080
      remote_info.Raise("Error checking node %s" %
3081
                        self.cfg.GetNodeName(self.instance.primary_node))
3082
      if not remote_info.payload: # not running already
3083
        raise errors.OpPrereqError("Instance %s is not running" %
3084
                                   self.instance.name, errors.ECODE_STATE)
3085

    
3086
      current_memory = remote_info.payload["memory"]
3087
      if (not self.op.force and
3088
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3089
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3090
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3091
                                   " and %d MB of memory unless --force is"
3092
                                   " given" %
3093
                                   (self.instance.name,
3094
                                    self.be_proposed[constants.BE_MINMEM],
3095
                                    self.be_proposed[constants.BE_MAXMEM]),
3096
                                   errors.ECODE_INVAL)
3097

    
3098
      delta = self.op.runtime_mem - current_memory
3099
      if delta > 0:
3100
        CheckNodeFreeMemory(
3101
            self, self.instance.primary_node,
3102
            "ballooning memory for instance %s" % self.instance.name, delta,
3103
            self.instance.hypervisor,
3104
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3105

    
3106
    # make self.cluster visible in the functions below
3107
    cluster = self.cluster
3108

    
3109
    def _PrepareNicCreate(_, params, private):
3110
      self._PrepareNicModification(params, private, None, None,
3111
                                   {}, cluster, pnode_uuid)
3112
      return (None, None)
3113

    
3114
    def _PrepareNicMod(_, nic, params, private):
3115
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3116
                                   nic.nicparams, cluster, pnode_uuid)
3117
      return None
3118

    
3119
    def _PrepareNicRemove(_, params, __):
3120
      ip = params.ip
3121
      net = params.network
3122
      if net is not None and ip is not None:
3123
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3124

    
3125
    # Verify NIC changes (operating on copy)
3126
    nics = self.instance.nics[:]
3127
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3128
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3129
    if len(nics) > constants.MAX_NICS:
3130
      raise errors.OpPrereqError("Instance has too many network interfaces"
3131
                                 " (%d), cannot add more" % constants.MAX_NICS,
3132
                                 errors.ECODE_STATE)
3133

    
3134
    # Pre-compute NIC changes (necessary to use result in hooks)
3135
    self._nic_chgdesc = []
3136
    if self.nicmod:
3137
      # Operate on copies as this is still in prereq
3138
      nics = [nic.Copy() for nic in self.instance.nics]
3139
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3140
                          self._CreateNewNic, self._ApplyNicMods,
3141
                          self._RemoveNic)
3142
      # Verify that NIC names are unique and valid
3143
      utils.ValidateDeviceNames("NIC", nics)
3144
      self._new_nics = nics
3145
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3146
    else:
3147
      self._new_nics = None
3148
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3149

    
3150
    if not self.op.ignore_ipolicy:
3151
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3152
                                                              group_info)
3153

    
3154
      # Fill ispec with backend parameters
3155
      ispec[constants.ISPEC_SPINDLE_USE] = \
3156
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3157
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3158
                                                         None)
3159

    
3160
      # Copy ispec to verify parameters with min/max values separately
3161
      if self.op.disk_template:
3162
        new_disk_template = self.op.disk_template
3163
      else:
3164
        new_disk_template = self.instance.disk_template
3165
      ispec_max = ispec.copy()
3166
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3167
        self.be_new.get(constants.BE_MAXMEM, None)
3168
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3169
                                                     new_disk_template)
3170
      ispec_min = ispec.copy()
3171
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3172
        self.be_new.get(constants.BE_MINMEM, None)
3173
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3174
                                                     new_disk_template)
3175

    
3176
      if (res_max or res_min):
3177
        # FIXME: Improve error message by including information about whether
3178
        # the upper or lower limit of the parameter fails the ipolicy.
3179
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3180
               (group_info, group_info.name,
3181
                utils.CommaJoin(set(res_max + res_min))))
3182
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3183

    
3184
  def _ConvertPlainToDrbd(self, feedback_fn):
3185
    """Converts an instance from plain to drbd.
3186

3187
    """
3188
    feedback_fn("Converting template to drbd")
3189
    pnode_uuid = self.instance.primary_node
3190
    snode_uuid = self.op.remote_node_uuid
3191

    
3192
    assert self.instance.disk_template == constants.DT_PLAIN
3193

    
3194
    # create a fake disk info for _GenerateDiskTemplate
3195
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3196
                  constants.IDISK_VG: d.logical_id[0],
3197
                  constants.IDISK_NAME: d.name}
3198
                 for d in self.instance.disks]
3199
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3200
                                     self.instance.uuid, pnode_uuid,
3201
                                     [snode_uuid], disk_info, None, None, 0,
3202
                                     feedback_fn, self.diskparams)
3203
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3204
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3205
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3206
    info = GetInstanceInfoText(self.instance)
3207
    feedback_fn("Creating additional volumes...")
3208
    # first, create the missing data and meta devices
3209
    for disk in anno_disks:
3210
      # unfortunately this is... not too nice
3211
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3212
                           info, True, p_excl_stor)
3213
      for child in disk.children:
3214
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3215
                             s_excl_stor)
3216
    # at this stage, all new LVs have been created, we can rename the
3217
    # old ones
3218
    feedback_fn("Renaming original volumes...")
3219
    rename_list = [(o, n.children[0].logical_id)
3220
                   for (o, n) in zip(self.instance.disks, new_disks)]
3221
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3222
    result.Raise("Failed to rename original LVs")
3223

    
3224
    feedback_fn("Initializing DRBD devices...")
3225
    # all child devices are in place, we can now create the DRBD devices
3226
    try:
3227
      for disk in anno_disks:
3228
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3229
                                       (snode_uuid, s_excl_stor)]:
3230
          f_create = node_uuid == pnode_uuid
3231
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3232
                               f_create, excl_stor)
3233
    except errors.GenericError, e:
3234
      feedback_fn("Initializing of DRBD devices failed;"
3235
                  " renaming back original volumes...")
3236
      rename_back_list = [(n.children[0], o.logical_id)
3237
                          for (n, o) in zip(new_disks, self.instance.disks)]
3238
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3239
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3240
      raise
3241

    
3242
    # at this point, the instance has been modified
3243
    self.instance.disk_template = constants.DT_DRBD8
3244
    self.instance.disks = new_disks
3245
    self.cfg.Update(self.instance, feedback_fn)
3246

    
3247
    # Release node locks while waiting for sync
3248
    ReleaseLocks(self, locking.LEVEL_NODE)
3249

    
3250
    # disks are created, waiting for sync
3251
    disk_abort = not WaitForSync(self, self.instance,
3252
                                 oneshot=not self.op.wait_for_sync)
3253
    if disk_abort:
3254
      raise errors.OpExecError("There are some degraded disks for"
3255
                               " this instance, please cleanup manually")
3256

    
3257
    # Node resource locks will be released by caller
3258

    
3259
  def _ConvertDrbdToPlain(self, feedback_fn):
3260
    """Converts an instance from drbd to plain.
3261

3262
    """
3263
    assert len(self.instance.secondary_nodes) == 1
3264
    assert self.instance.disk_template == constants.DT_DRBD8
3265

    
3266
    pnode_uuid = self.instance.primary_node
3267
    snode_uuid = self.instance.secondary_nodes[0]
3268
    feedback_fn("Converting template to plain")
3269

    
3270
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3271
    new_disks = [d.children[0] for d in self.instance.disks]
3272

    
3273
    # copy over size, mode and name
3274
    for parent, child in zip(old_disks, new_disks):
3275
      child.size = parent.size
3276
      child.mode = parent.mode
3277
      child.name = parent.name
3278

    
3279
    # this is a DRBD disk, return its port to the pool
3280
    # NOTE: this must be done right before the call to cfg.Update!
3281
    for disk in old_disks:
3282
      tcp_port = disk.logical_id[2]
3283
      self.cfg.AddTcpUdpPort(tcp_port)
3284

    
3285
    # update instance structure
3286
    self.instance.disks = new_disks
3287
    self.instance.disk_template = constants.DT_PLAIN
3288
    _UpdateIvNames(0, self.instance.disks)
3289
    self.cfg.Update(self.instance, feedback_fn)
3290

    
3291
    # Release locks in case removing disks takes a while
3292
    ReleaseLocks(self, locking.LEVEL_NODE)
3293

    
3294
    feedback_fn("Removing volumes on the secondary node...")
3295
    for disk in old_disks:
3296
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3297
      result.Warn("Could not remove block device %s on node %s,"
3298
                  " continuing anyway" %
3299
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3300
                  self.LogWarning)
3301

    
3302
    feedback_fn("Removing unneeded volumes on the primary node...")
3303
    for idx, disk in enumerate(old_disks):
3304
      meta = disk.children[1]
3305
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3306
      result.Warn("Could not remove metadata for disk %d on node %s,"
3307
                  " continuing anyway" %
3308
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3309
                  self.LogWarning)
3310

    
3311
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3312
    self.LogInfo("Trying to hotplug device...")
3313
    msg = "hotplug:"
3314
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3315
                                          self.instance, action, dev_type,
3316
                                          (device, self.instance),
3317
                                          extra, seq)
3318
    if result.fail_msg:
3319
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3320
      self.LogInfo("Continuing execution..")
3321
      msg += "failed"
3322
    else:
3323
      self.LogInfo("Hotplug done.")
3324
      msg += "done"
3325
    return msg
3326

    
3327
  def _CreateNewDisk(self, idx, params, _):
3328
    """Creates a new disk.
3329

3330
    """
3331
    # add a new disk
3332
    if self.instance.disk_template in constants.DTS_FILEBASED:
3333
      (file_driver, file_path) = self.instance.disks[0].logical_id
3334
      file_path = os.path.dirname(file_path)
3335
    else:
3336
      file_driver = file_path = None
3337

    
3338
    disk = \
3339
      GenerateDiskTemplate(self, self.instance.disk_template,
3340
                           self.instance.uuid, self.instance.primary_node,
3341
                           self.instance.secondary_nodes, [params], file_path,
3342
                           file_driver, idx, self.Log, self.diskparams)[0]
3343

    
3344
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3345

    
3346
    if self.cluster.prealloc_wipe_disks:
3347
      # Wipe new disk
3348
      WipeOrCleanupDisks(self, self.instance,
3349
                         disks=[(idx, disk, 0)],
3350
                         cleanup=new_disks)
3351

    
3352
    changes = [
3353
      ("disk/%d" % idx,
3354
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3355
      ]
3356
    if self.op.hotplug:
3357
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3358
                                               (disk, self.instance),
3359
                                               self.instance.name, True, idx)
3360
      if result.fail_msg:
3361
        changes.append(("disk/%d" % idx, "assemble:failed"))
3362
        self.LogWarning("Can't assemble newly created disk %d: %s",
3363
                        idx, result.fail_msg)
3364
      else:
3365
        _, link_name = result.payload
3366
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3367
                                  constants.HOTPLUG_TARGET_DISK,
3368
                                  disk, link_name, idx)
3369
        changes.append(("disk/%d" % idx, msg))
3370

    
3371
    return (disk, changes)
3372

    
3373
  def _PostAddDisk(self, _, disk):
3374
    if not WaitForSync(self, self.instance, disks=[disk],
3375
                       oneshot=not self.op.wait_for_sync):
3376
      raise errors.OpExecError("Failed to sync disks of %s" %
3377
                               self.instance.name)
3378

    
3379
    # the disk is active at this point, so deactivate it if the instance disks
3380
    # are supposed to be inactive
3381
    if not self.instance.disks_active:
3382
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3383

    
3384
  def _ModifyDisk(self, idx, disk, params, _):
3385
    """Modifies a disk.
3386

3387
    """
3388
    changes = []
3389
    if constants.IDISK_MODE in params:
3390
      disk.mode = params.get(constants.IDISK_MODE)
3391
      changes.append(("disk.mode/%d" % idx, disk.mode))
3392

    
3393
    if constants.IDISK_NAME in params:
3394
      disk.name = params.get(constants.IDISK_NAME)
3395
      changes.append(("disk.name/%d" % idx, disk.name))
3396

    
3397
    # Modify arbitrary params in case instance template is ext
3398
    for key, value in params.iteritems():
3399
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3400
          self.instance.disk_template == constants.DT_EXT):
3401
        # stolen from GetUpdatedParams: default means reset/delete
3402
        if value.lower() == constants.VALUE_DEFAULT:
3403
          try:
3404
            del disk.params[key]
3405
          except KeyError:
3406
            pass
3407
        else:
3408
          disk.params[key] = value
3409
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3410

    
3411
    return changes
3412

    
3413
  def _RemoveDisk(self, idx, root, _):
3414
    """Removes a disk.
3415

3416
    """
3417
    hotmsg = ""
3418
    if self.op.hotplug:
3419
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3420
                                   constants.HOTPLUG_TARGET_DISK,
3421
                                   root, None, idx)
3422
      ShutdownInstanceDisks(self, self.instance, [root])
3423

    
3424
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3425
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3426
                             self.instance.primary_node):
3427
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3428
              .fail_msg
3429
      if msg:
3430
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3431
                        " continuing anyway", idx,
3432
                        self.cfg.GetNodeName(node_uuid), msg)
3433

    
3434
    # if this is a DRBD disk, return its port to the pool
3435
    if root.dev_type in constants.DTS_DRBD:
3436
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3437

    
3438
    return hotmsg
3439

    
3440
  def _CreateNewNic(self, idx, params, private):
3441
    """Creates data structure for a new network interface.
3442

3443
    """
3444
    mac = params[constants.INIC_MAC]
3445
    ip = params.get(constants.INIC_IP, None)
3446
    net = params.get(constants.INIC_NETWORK, None)
3447
    name = params.get(constants.INIC_NAME, None)
3448
    net_uuid = self.cfg.LookupNetwork(net)
3449
    #TODO: not private.filled?? can a nic have no nicparams??
3450
    nicparams = private.filled
3451
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3452
                       nicparams=nicparams)
3453
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3454

    
3455
    changes = [
3456
      ("nic.%d" % idx,
3457
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3458
       (mac, ip, private.filled[constants.NIC_MODE],
3459
       private.filled[constants.NIC_LINK], net)),
3460
      ]
3461

    
3462
    if self.op.hotplug:
3463
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3464
                                constants.HOTPLUG_TARGET_NIC,
3465
                                nobj, None, idx)
3466
      changes.append(("nic.%d" % idx, msg))
3467

    
3468
    return (nobj, changes)
3469

    
3470
  def _ApplyNicMods(self, idx, nic, params, private):
3471
    """Modifies a network interface.
3472

3473
    """
3474
    changes = []
3475

    
3476
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3477
      if key in params:
3478
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3479
        setattr(nic, key, params[key])
3480

    
3481
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3482
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3483
    if new_net_uuid != nic.network:
3484
      changes.append(("nic.network/%d" % idx, new_net))
3485
      nic.network = new_net_uuid
3486

    
3487
    if private.filled:
3488
      nic.nicparams = private.filled
3489

    
3490
      for (key, val) in nic.nicparams.items():
3491
        changes.append(("nic.%s/%d" % (key, idx), val))
3492

    
3493
    if self.op.hotplug:
3494
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3495
                                constants.HOTPLUG_TARGET_NIC,
3496
                                nic, None, idx)
3497
      changes.append(("nic/%d" % idx, msg))
3498

    
3499
    return changes
3500

    
3501
  def _RemoveNic(self, idx, nic, _):
3502
    if self.op.hotplug:
3503
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3504
                                 constants.HOTPLUG_TARGET_NIC,
3505
                                 nic, None, idx)
3506

    
3507
  def Exec(self, feedback_fn):
3508
    """Modifies an instance.
3509

3510
    All parameters take effect only at the next restart of the instance.
3511

3512
    """
3513
    # Process here the warnings from CheckPrereq, as we don't have a
3514
    # feedback_fn there.
3515
    # TODO: Replace with self.LogWarning
3516
    for warn in self.warn:
3517
      feedback_fn("WARNING: %s" % warn)
3518

    
3519
    assert ((self.op.disk_template is None) ^
3520
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3521
      "Not owning any node resource locks"
3522

    
3523
    result = []
3524

    
3525
    # New primary node
3526
    if self.op.pnode_uuid:
3527
      self.instance.primary_node = self.op.pnode_uuid
3528

    
3529
    # runtime memory
3530
    if self.op.runtime_mem:
3531
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3532
                                                     self.instance,
3533
                                                     self.op.runtime_mem)
3534
      rpcres.Raise("Cannot modify instance runtime memory")
3535
      result.append(("runtime_memory", self.op.runtime_mem))
3536

    
3537
    # Apply disk changes
3538
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3539
                        self._CreateNewDisk, self._ModifyDisk,
3540
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3541
    _UpdateIvNames(0, self.instance.disks)
3542

    
3543
    if self.op.disk_template:
3544
      if __debug__:
3545
        check_nodes = set(self.instance.all_nodes)
3546
        if self.op.remote_node_uuid:
3547
          check_nodes.add(self.op.remote_node_uuid)
3548
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3549
          owned = self.owned_locks(level)
3550
          assert not (check_nodes - owned), \
3551
            ("Not owning the correct locks, owning %r, expected at least %r" %
3552
             (owned, check_nodes))
3553

    
3554
      r_shut = ShutdownInstanceDisks(self, self.instance)
3555
      if not r_shut:
3556
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3557
                                 " proceed with disk template conversion")
3558
      mode = (self.instance.disk_template, self.op.disk_template)
3559
      try:
3560
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3561
      except:
3562
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3563
        raise
3564
      result.append(("disk_template", self.op.disk_template))
3565

    
3566
      assert self.instance.disk_template == self.op.disk_template, \
3567
        ("Expected disk template '%s', found '%s'" %
3568
         (self.op.disk_template, self.instance.disk_template))
3569

    
3570
    # Release node and resource locks if there are any (they might already have
3571
    # been released during disk conversion)
3572
    ReleaseLocks(self, locking.LEVEL_NODE)
3573
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3574

    
3575
    # Apply NIC changes
3576
    if self._new_nics is not None:
3577
      self.instance.nics = self._new_nics
3578
      result.extend(self._nic_chgdesc)
3579

    
3580
    # hvparams changes
3581
    if self.op.hvparams:
3582
      self.instance.hvparams = self.hv_inst
3583
      for key, val in self.op.hvparams.iteritems():
3584
        result.append(("hv/%s" % key, val))
3585

    
3586
    # beparams changes
3587
    if self.op.beparams:
3588
      self.instance.beparams = self.be_inst
3589
      for key, val in self.op.beparams.iteritems():
3590
        result.append(("be/%s" % key, val))
3591

    
3592
    # OS change
3593
    if self.op.os_name:
3594
      self.instance.os = self.op.os_name
3595

    
3596
    # osparams changes
3597
    if self.op.osparams:
3598
      self.instance.osparams = self.os_inst
3599
      for key, val in self.op.osparams.iteritems():
3600
        result.append(("os/%s" % key, val))
3601

    
3602
    if self.op.offline is None:
3603
      # Ignore
3604
      pass
3605
    elif self.op.offline:
3606
      # Mark instance as offline
3607
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3608
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3609
    else:
3610
      # Mark instance as online, but stopped
3611
      self.cfg.MarkInstanceDown(self.instance.uuid)
3612
      result.append(("admin_state", constants.ADMINST_DOWN))
3613

    
3614
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3615

    
3616
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3617
                self.owned_locks(locking.LEVEL_NODE)), \
3618
      "All node locks should have been released by now"
3619

    
3620
    return result
3621

    
3622
  _DISK_CONVERSIONS = {
3623
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3624
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3625
    }
3626

    
3627

    
3628
class LUInstanceChangeGroup(LogicalUnit):
3629
  HPATH = "instance-change-group"
3630
  HTYPE = constants.HTYPE_INSTANCE
3631
  REQ_BGL = False
3632

    
3633
  def ExpandNames(self):
3634
    self.share_locks = ShareAll()
3635

    
3636
    self.needed_locks = {
3637
      locking.LEVEL_NODEGROUP: [],
3638
      locking.LEVEL_NODE: [],
3639
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3640
      }
3641

    
3642
    self._ExpandAndLockInstance()
3643

    
3644
    if self.op.target_groups:
3645
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3646
                                  self.op.target_groups)
3647
    else:
3648
      self.req_target_uuids = None
3649

    
3650
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3651

    
3652
  def DeclareLocks(self, level):
3653
    if level == locking.LEVEL_NODEGROUP:
3654
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3655

    
3656
      if self.req_target_uuids:
3657
        lock_groups = set(self.req_target_uuids)
3658

    
3659
        # Lock all groups used by instance optimistically; this requires going
3660
        # via the node before it's locked, requiring verification later on
3661
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3662
        lock_groups.update(instance_groups)
3663
      else:
3664
        # No target groups, need to lock all of them
3665
        lock_groups = locking.ALL_SET
3666

    
3667
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3668

    
3669
    elif level == locking.LEVEL_NODE:
3670
      if self.req_target_uuids:
3671
        # Lock all nodes used by instances
3672
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3673
        self._LockInstancesNodes()
3674

    
3675
        # Lock all nodes in all potential target groups
3676
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3677
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3678
        member_nodes = [node_uuid
3679
                        for group in lock_groups
3680
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3681
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3682
      else:
3683
        # Lock all nodes as all groups are potential targets
3684
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3685

    
3686
  def CheckPrereq(self):
3687
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3688
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3689
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3690

    
3691
    assert (self.req_target_uuids is None or
3692
            owned_groups.issuperset(self.req_target_uuids))
3693
    assert owned_instance_names == set([self.op.instance_name])
3694

    
3695
    # Get instance information
3696
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3697

    
3698
    # Check if node groups for locked instance are still correct
3699
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3700
      ("Instance %s's nodes changed while we kept the lock" %
3701
       self.op.instance_name)
3702

    
3703
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3704
                                          owned_groups)
3705

    
3706
    if self.req_target_uuids:
3707
      # User requested specific target groups
3708
      self.target_uuids = frozenset(self.req_target_uuids)
3709
    else:
3710
      # All groups except those used by the instance are potential targets
3711
      self.target_uuids = owned_groups - inst_groups
3712

    
3713
    conflicting_groups = self.target_uuids & inst_groups
3714
    if conflicting_groups:
3715
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3716
                                 " used by the instance '%s'" %
3717
                                 (utils.CommaJoin(conflicting_groups),
3718
                                  self.op.instance_name),
3719
                                 errors.ECODE_INVAL)
3720

    
3721
    if not self.target_uuids:
3722
      raise errors.OpPrereqError("There are no possible target groups",
3723
                                 errors.ECODE_INVAL)
3724

    
3725
  def BuildHooksEnv(self):
3726
    """Build hooks env.
3727

3728
    """
3729
    assert self.target_uuids
3730

    
3731
    env = {
3732
      "TARGET_GROUPS": " ".join(self.target_uuids),
3733
      }
3734

    
3735
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3736

    
3737
    return env
3738

    
3739
  def BuildHooksNodes(self):
3740
    """Build hooks nodes.
3741

3742
    """
3743
    mn = self.cfg.GetMasterNode()
3744
    return ([mn], [mn])
3745

    
3746
  def Exec(self, feedback_fn):
3747
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3748

    
3749
    assert instances == [self.op.instance_name], "Instance not locked"
3750

    
3751
    req = iallocator.IAReqGroupChange(instances=instances,
3752
                                      target_groups=list(self.target_uuids))
3753
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3754

    
3755
    ial.Run(self.op.iallocator)
3756

    
3757
    if not ial.success:
3758
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3759
                                 " instance '%s' using iallocator '%s': %s" %
3760
                                 (self.op.instance_name, self.op.iallocator,
3761
                                  ial.info), errors.ECODE_NORES)
3762

    
3763
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3764

    
3765
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3766
                 " instance '%s'", len(jobs), self.op.instance_name)
3767

    
3768
    return ResultWithJobs(jobs)