Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 1a182390

History | View | Annotate | Download (151 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
import ganeti.rpc.node as rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234
    if vlan:
235
      nicparams[constants.NIC_VLAN] = vlan
236

    
237
    check_params = cluster.SimpleFillNIC(nicparams)
238
    objects.NIC.CheckParameterSyntax(check_params)
239
    net_uuid = cfg.LookupNetwork(net)
240
    name = nic.get(constants.INIC_NAME, None)
241
    if name is not None and name.lower() == constants.VALUE_NONE:
242
      name = None
243
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
244
                          network=net_uuid, nicparams=nicparams)
245
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
246
    nics.append(nic_obj)
247

    
248
  return nics
249

    
250

    
251
def _CheckForConflictingIp(lu, ip, node_uuid):
252
  """In case of conflicting IP address raise error.
253

254
  @type ip: string
255
  @param ip: IP address
256
  @type node_uuid: string
257
  @param node_uuid: node UUID
258

259
  """
260
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261
  if conf_net is not None:
262
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
263
                                " network %s, but the target NIC does not." %
264
                                (ip, conf_net)),
265
                               errors.ECODE_STATE)
266

    
267
  return (None, None)
268

    
269

    
270
def _ComputeIPolicyInstanceSpecViolation(
271
  ipolicy, instance_spec, disk_template,
272
  _compute_fn=ComputeIPolicySpecViolation):
273
  """Compute if instance specs meets the specs of ipolicy.
274

275
  @type ipolicy: dict
276
  @param ipolicy: The ipolicy to verify against
277
  @param instance_spec: dict
278
  @param instance_spec: The instance spec to verify
279
  @type disk_template: string
280
  @param disk_template: the disk template of the instance
281
  @param _compute_fn: The function to verify ipolicy (unittest only)
282
  @see: L{ComputeIPolicySpecViolation}
283

284
  """
285
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
286
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
287
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
288
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
289
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
290
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
291

    
292
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
293
                     disk_sizes, spindle_use, disk_template)
294

    
295

    
296
def _CheckOSVariant(os_obj, name):
297
  """Check whether an OS name conforms to the os variants specification.
298

299
  @type os_obj: L{objects.OS}
300
  @param os_obj: OS object to check
301
  @type name: string
302
  @param name: OS name passed by the user, to check for validity
303

304
  """
305
  variant = objects.OS.GetVariant(name)
306
  if not os_obj.supported_variants:
307
    if variant:
308
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
309
                                 " passed)" % (os_obj.name, variant),
310
                                 errors.ECODE_INVAL)
311
    return
312
  if not variant:
313
    raise errors.OpPrereqError("OS name must include a variant",
314
                               errors.ECODE_INVAL)
315

    
316
  if variant not in os_obj.supported_variants:
317
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
318

    
319

    
320
class LUInstanceCreate(LogicalUnit):
321
  """Create an instance.
322

323
  """
324
  HPATH = "instance-add"
325
  HTYPE = constants.HTYPE_INSTANCE
326
  REQ_BGL = False
327

    
328
  def _CheckDiskTemplateValid(self):
329
    """Checks validity of disk template.
330

331
    """
332
    cluster = self.cfg.GetClusterInfo()
333
    if self.op.disk_template is None:
334
      # FIXME: It would be better to take the default disk template from the
335
      # ipolicy, but for the ipolicy we need the primary node, which we get from
336
      # the iallocator, which wants the disk template as input. To solve this
337
      # chicken-and-egg problem, it should be possible to specify just a node
338
      # group from the iallocator and take the ipolicy from that.
339
      self.op.disk_template = cluster.enabled_disk_templates[0]
340
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
341

    
342
  def _CheckDiskArguments(self):
343
    """Checks validity of disk-related arguments.
344

345
    """
346
    # check that disk's names are unique and valid
347
    utils.ValidateDeviceNames("disk", self.op.disks)
348

    
349
    self._CheckDiskTemplateValid()
350

    
351
    # check disks. parameter names and consistent adopt/no-adopt strategy
352
    has_adopt = has_no_adopt = False
353
    for disk in self.op.disks:
354
      if self.op.disk_template != constants.DT_EXT:
355
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
356
      if constants.IDISK_ADOPT in disk:
357
        has_adopt = True
358
      else:
359
        has_no_adopt = True
360
    if has_adopt and has_no_adopt:
361
      raise errors.OpPrereqError("Either all disks are adopted or none is",
362
                                 errors.ECODE_INVAL)
363
    if has_adopt:
364
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
365
        raise errors.OpPrereqError("Disk adoption is not supported for the"
366
                                   " '%s' disk template" %
367
                                   self.op.disk_template,
368
                                   errors.ECODE_INVAL)
369
      if self.op.iallocator is not None:
370
        raise errors.OpPrereqError("Disk adoption not allowed with an"
371
                                   " iallocator script", errors.ECODE_INVAL)
372
      if self.op.mode == constants.INSTANCE_IMPORT:
373
        raise errors.OpPrereqError("Disk adoption not allowed for"
374
                                   " instance import", errors.ECODE_INVAL)
375
    else:
376
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
377
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
378
                                   " but no 'adopt' parameter given" %
379
                                   self.op.disk_template,
380
                                   errors.ECODE_INVAL)
381

    
382
    self.adopt_disks = has_adopt
383

    
384
  def _CheckVLANArguments(self):
385
    """ Check validity of VLANs if given
386

387
    """
388
    for nic in self.op.nics:
389
      vlan = nic.get(constants.INIC_VLAN, None)
390
      if vlan:
391
        if vlan[0] == ".":
392
          # vlan starting with dot means single untagged vlan,
393
          # might be followed by trunk (:)
394
          if not vlan[1:].isdigit():
395
            vlanlist = vlan[1:].split(':')
396
            for vl in vlanlist:
397
              if not vl.isdigit():
398
                raise errors.OpPrereqError("Specified VLAN parameter is "
399
                                           "invalid : %s" % vlan,
400
                                             errors.ECODE_INVAL)
401
        elif vlan[0] == ":":
402
          # Trunk - tagged only
403
          vlanlist = vlan[1:].split(':')
404
          for vl in vlanlist:
405
            if not vl.isdigit():
406
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
407
                                           " : %s" % vlan, errors.ECODE_INVAL)
408
        elif vlan.isdigit():
409
          # This is the simplest case. No dots, only single digit
410
          # -> Create untagged access port, dot needs to be added
411
          nic[constants.INIC_VLAN] = "." + vlan
412
        else:
413
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
414
                                       " : %s" % vlan, errors.ECODE_INVAL)
415

    
416
  def CheckArguments(self):
417
    """Check arguments.
418

419
    """
420
    # do not require name_check to ease forward/backward compatibility
421
    # for tools
422
    if self.op.no_install and self.op.start:
423
      self.LogInfo("No-installation mode selected, disabling startup")
424
      self.op.start = False
425
    # validate/normalize the instance name
426
    self.op.instance_name = \
427
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
428

    
429
    if self.op.ip_check and not self.op.name_check:
430
      # TODO: make the ip check more flexible and not depend on the name check
431
      raise errors.OpPrereqError("Cannot do IP address check without a name"
432
                                 " check", errors.ECODE_INVAL)
433

    
434
    # add nic for instance communication
435
    if self.op.instance_communication:
436
      nic_name = "%s%s" % (constants.INSTANCE_COMMUNICATION_NIC_PREFIX,
437
                           self.op.instance_name)
438
      communication_network = constants.INSTANCE_COMMUNICATION_NETWORK
439

    
440
      self.op.nics.append({constants.INIC_NAME: nic_name,
441
                           constants.INIC_MAC: constants.VALUE_GENERATE,
442
                           constants.INIC_IP: constants.NIC_IP_POOL,
443
                           constants.INIC_NETWORK: communication_network})
444

    
445
    # check nics' parameter names
446
    for nic in self.op.nics:
447
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
448
    # check that NIC's parameters names are unique and valid
449
    utils.ValidateDeviceNames("NIC", self.op.nics)
450

    
451
    self._CheckVLANArguments()
452

    
453
    self._CheckDiskArguments()
454
    assert self.op.disk_template is not None
455

    
456
    # instance name verification
457
    if self.op.name_check:
458
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
459
      self.op.instance_name = self.hostname.name
460
      # used in CheckPrereq for ip ping check
461
      self.check_ip = self.hostname.ip
462
    else:
463
      self.check_ip = None
464

    
465
    # file storage checks
466
    if (self.op.file_driver and
467
        not self.op.file_driver in constants.FILE_DRIVER):
468
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
469
                                 self.op.file_driver, errors.ECODE_INVAL)
470

    
471
    # set default file_driver if unset and required
472
    if (not self.op.file_driver and
473
        self.op.disk_template in constants.DTS_FILEBASED):
474
      self.op.file_driver = constants.FD_LOOP
475

    
476
    ### Node/iallocator related checks
477
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
478

    
479
    if self.op.pnode is not None:
480
      if self.op.disk_template in constants.DTS_INT_MIRROR:
481
        if self.op.snode is None:
482
          raise errors.OpPrereqError("The networked disk templates need"
483
                                     " a mirror node", errors.ECODE_INVAL)
484
      elif self.op.snode:
485
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
486
                        " template")
487
        self.op.snode = None
488

    
489
    _CheckOpportunisticLocking(self.op)
490

    
491
    if self.op.mode == constants.INSTANCE_IMPORT:
492
      # On import force_variant must be True, because if we forced it at
493
      # initial install, our only chance when importing it back is that it
494
      # works again!
495
      self.op.force_variant = True
496

    
497
      if self.op.no_install:
498
        self.LogInfo("No-installation mode has no effect during import")
499

    
500
    elif self.op.mode == constants.INSTANCE_CREATE:
501
      if self.op.os_type is None:
502
        raise errors.OpPrereqError("No guest OS specified",
503
                                   errors.ECODE_INVAL)
504
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
505
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
506
                                   " installation" % self.op.os_type,
507
                                   errors.ECODE_STATE)
508
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
509
      self._cds = GetClusterDomainSecret()
510

    
511
      # Check handshake to ensure both clusters have the same domain secret
512
      src_handshake = self.op.source_handshake
513
      if not src_handshake:
514
        raise errors.OpPrereqError("Missing source handshake",
515
                                   errors.ECODE_INVAL)
516

    
517
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
518
                                                           src_handshake)
519
      if errmsg:
520
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
521
                                   errors.ECODE_INVAL)
522

    
523
      # Load and check source CA
524
      self.source_x509_ca_pem = self.op.source_x509_ca
525
      if not self.source_x509_ca_pem:
526
        raise errors.OpPrereqError("Missing source X509 CA",
527
                                   errors.ECODE_INVAL)
528

    
529
      try:
530
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
531
                                                    self._cds)
532
      except OpenSSL.crypto.Error, err:
533
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
534
                                   (err, ), errors.ECODE_INVAL)
535

    
536
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
537
      if errcode is not None:
538
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
539
                                   errors.ECODE_INVAL)
540

    
541
      self.source_x509_ca = cert
542

    
543
      src_instance_name = self.op.source_instance_name
544
      if not src_instance_name:
545
        raise errors.OpPrereqError("Missing source instance name",
546
                                   errors.ECODE_INVAL)
547

    
548
      self.source_instance_name = \
549
        netutils.GetHostname(name=src_instance_name).name
550

    
551
    else:
552
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
553
                                 self.op.mode, errors.ECODE_INVAL)
554

    
555
  def ExpandNames(self):
556
    """ExpandNames for CreateInstance.
557

558
    Figure out the right locks for instance creation.
559

560
    """
561
    self.needed_locks = {}
562

    
563
    # this is just a preventive check, but someone might still add this
564
    # instance in the meantime, and creation will fail at lock-add time
565
    if self.op.instance_name in\
566
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
567
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
568
                                 self.op.instance_name, errors.ECODE_EXISTS)
569

    
570
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
571

    
572
    if self.op.iallocator:
573
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
574
      # specifying a group on instance creation and then selecting nodes from
575
      # that group
576
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
577
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
578

    
579
      if self.op.opportunistic_locking:
580
        self.opportunistic_locks[locking.LEVEL_NODE] = True
581
    else:
582
      (self.op.pnode_uuid, self.op.pnode) = \
583
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
584
      nodelist = [self.op.pnode_uuid]
585
      if self.op.snode is not None:
586
        (self.op.snode_uuid, self.op.snode) = \
587
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
588
        nodelist.append(self.op.snode_uuid)
589
      self.needed_locks[locking.LEVEL_NODE] = nodelist
590

    
591
    # in case of import lock the source node too
592
    if self.op.mode == constants.INSTANCE_IMPORT:
593
      src_node = self.op.src_node
594
      src_path = self.op.src_path
595

    
596
      if src_path is None:
597
        self.op.src_path = src_path = self.op.instance_name
598

    
599
      if src_node is None:
600
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
601
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
602
        self.op.src_node = None
603
        if os.path.isabs(src_path):
604
          raise errors.OpPrereqError("Importing an instance from a path"
605
                                     " requires a source node option",
606
                                     errors.ECODE_INVAL)
607
      else:
608
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
609
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
610
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
611
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
612
        if not os.path.isabs(src_path):
613
          self.op.src_path = \
614
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
615

    
616
    self.needed_locks[locking.LEVEL_NODE_RES] = \
617
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
618

    
619
    # Optimistically acquire shared group locks (we're reading the
620
    # configuration).  We can't just call GetInstanceNodeGroups, because the
621
    # instance doesn't exist yet. Therefore we lock all node groups of all
622
    # nodes we have.
623
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
624
      # In the case we lock all nodes for opportunistic allocation, we have no
625
      # choice than to lock all groups, because they're allocated before nodes.
626
      # This is sad, but true. At least we release all those we don't need in
627
      # CheckPrereq later.
628
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
629
    else:
630
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
631
        list(self.cfg.GetNodeGroupsFromNodes(
632
          self.needed_locks[locking.LEVEL_NODE]))
633
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
634

    
635
  def DeclareLocks(self, level):
636
    if level == locking.LEVEL_NODE_RES and \
637
      self.opportunistic_locks[locking.LEVEL_NODE]:
638
      # Even when using opportunistic locking, we require the same set of
639
      # NODE_RES locks as we got NODE locks
640
      self.needed_locks[locking.LEVEL_NODE_RES] = \
641
        self.owned_locks(locking.LEVEL_NODE)
642

    
643
  def _RunAllocator(self):
644
    """Run the allocator based on input opcode.
645

646
    """
647
    if self.op.opportunistic_locking:
648
      # Only consider nodes for which a lock is held
649
      node_name_whitelist = self.cfg.GetNodeNames(
650
        self.owned_locks(locking.LEVEL_NODE))
651
    else:
652
      node_name_whitelist = None
653

    
654
    req = _CreateInstanceAllocRequest(self.op, self.disks,
655
                                      self.nics, self.be_full,
656
                                      node_name_whitelist)
657
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
658

    
659
    ial.Run(self.op.iallocator)
660

    
661
    if not ial.success:
662
      # When opportunistic locks are used only a temporary failure is generated
663
      if self.op.opportunistic_locking:
664
        ecode = errors.ECODE_TEMP_NORES
665
      else:
666
        ecode = errors.ECODE_NORES
667

    
668
      raise errors.OpPrereqError("Can't compute nodes using"
669
                                 " iallocator '%s': %s" %
670
                                 (self.op.iallocator, ial.info),
671
                                 ecode)
672

    
673
    (self.op.pnode_uuid, self.op.pnode) = \
674
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
675
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
676
                 self.op.instance_name, self.op.iallocator,
677
                 utils.CommaJoin(ial.result))
678

    
679
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
680

    
681
    if req.RequiredNodes() == 2:
682
      (self.op.snode_uuid, self.op.snode) = \
683
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
684

    
685
  def BuildHooksEnv(self):
686
    """Build hooks env.
687

688
    This runs on master, primary and secondary nodes of the instance.
689

690
    """
691
    env = {
692
      "ADD_MODE": self.op.mode,
693
      }
694
    if self.op.mode == constants.INSTANCE_IMPORT:
695
      env["SRC_NODE"] = self.op.src_node
696
      env["SRC_PATH"] = self.op.src_path
697
      env["SRC_IMAGES"] = self.src_images
698

    
699
    env.update(BuildInstanceHookEnv(
700
      name=self.op.instance_name,
701
      primary_node_name=self.op.pnode,
702
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
703
      status=self.op.start,
704
      os_type=self.op.os_type,
705
      minmem=self.be_full[constants.BE_MINMEM],
706
      maxmem=self.be_full[constants.BE_MAXMEM],
707
      vcpus=self.be_full[constants.BE_VCPUS],
708
      nics=NICListToTuple(self, self.nics),
709
      disk_template=self.op.disk_template,
710
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
711
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
712
             for d in self.disks],
713
      bep=self.be_full,
714
      hvp=self.hv_full,
715
      hypervisor_name=self.op.hypervisor,
716
      tags=self.op.tags,
717
      ))
718

    
719
    return env
720

    
721
  def BuildHooksNodes(self):
722
    """Build hooks nodes.
723

724
    """
725
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
726
    return nl, nl
727

    
728
  def _ReadExportInfo(self):
729
    """Reads the export information from disk.
730

731
    It will override the opcode source node and path with the actual
732
    information, if these two were not specified before.
733

734
    @return: the export information
735

736
    """
737
    assert self.op.mode == constants.INSTANCE_IMPORT
738

    
739
    if self.op.src_node_uuid is None:
740
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
741
      exp_list = self.rpc.call_export_list(locked_nodes)
742
      found = False
743
      for node_uuid in exp_list:
744
        if exp_list[node_uuid].fail_msg:
745
          continue
746
        if self.op.src_path in exp_list[node_uuid].payload:
747
          found = True
748
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
749
          self.op.src_node_uuid = node_uuid
750
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
751
                                            self.op.src_path)
752
          break
753
      if not found:
754
        raise errors.OpPrereqError("No export found for relative path %s" %
755
                                   self.op.src_path, errors.ECODE_INVAL)
756

    
757
    CheckNodeOnline(self, self.op.src_node_uuid)
758
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
759
    result.Raise("No export or invalid export found in dir %s" %
760
                 self.op.src_path)
761

    
762
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
763
    if not export_info.has_section(constants.INISECT_EXP):
764
      raise errors.ProgrammerError("Corrupted export config",
765
                                   errors.ECODE_ENVIRON)
766

    
767
    ei_version = export_info.get(constants.INISECT_EXP, "version")
768
    if int(ei_version) != constants.EXPORT_VERSION:
769
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
770
                                 (ei_version, constants.EXPORT_VERSION),
771
                                 errors.ECODE_ENVIRON)
772
    return export_info
773

    
774
  def _ReadExportParams(self, einfo):
775
    """Use export parameters as defaults.
776

777
    In case the opcode doesn't specify (as in override) some instance
778
    parameters, then try to use them from the export information, if
779
    that declares them.
780

781
    """
782
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
783

    
784
    if not self.op.disks:
785
      disks = []
786
      # TODO: import the disk iv_name too
787
      for idx in range(constants.MAX_DISKS):
788
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
789
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
790
          disks.append({constants.IDISK_SIZE: disk_sz})
791
      self.op.disks = disks
792
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
793
        raise errors.OpPrereqError("No disk info specified and the export"
794
                                   " is missing the disk information",
795
                                   errors.ECODE_INVAL)
796

    
797
    if not self.op.nics:
798
      nics = []
799
      for idx in range(constants.MAX_NICS):
800
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
801
          ndict = {}
802
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
803
            nic_param_name = "nic%d_%s" % (idx, name)
804
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
805
              v = einfo.get(constants.INISECT_INS, nic_param_name)
806
              ndict[name] = v
807
          nics.append(ndict)
808
        else:
809
          break
810
      self.op.nics = nics
811

    
812
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
813
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
814

    
815
    if (self.op.hypervisor is None and
816
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
817
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
818

    
819
    if einfo.has_section(constants.INISECT_HYP):
820
      # use the export parameters but do not override the ones
821
      # specified by the user
822
      for name, value in einfo.items(constants.INISECT_HYP):
823
        if name not in self.op.hvparams:
824
          self.op.hvparams[name] = value
825

    
826
    if einfo.has_section(constants.INISECT_BEP):
827
      # use the parameters, without overriding
828
      for name, value in einfo.items(constants.INISECT_BEP):
829
        if name not in self.op.beparams:
830
          self.op.beparams[name] = value
831
        # Compatibility for the old "memory" be param
832
        if name == constants.BE_MEMORY:
833
          if constants.BE_MAXMEM not in self.op.beparams:
834
            self.op.beparams[constants.BE_MAXMEM] = value
835
          if constants.BE_MINMEM not in self.op.beparams:
836
            self.op.beparams[constants.BE_MINMEM] = value
837
    else:
838
      # try to read the parameters old style, from the main section
839
      for name in constants.BES_PARAMETERS:
840
        if (name not in self.op.beparams and
841
            einfo.has_option(constants.INISECT_INS, name)):
842
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
843

    
844
    if einfo.has_section(constants.INISECT_OSP):
845
      # use the parameters, without overriding
846
      for name, value in einfo.items(constants.INISECT_OSP):
847
        if name not in self.op.osparams:
848
          self.op.osparams[name] = value
849

    
850
  def _RevertToDefaults(self, cluster):
851
    """Revert the instance parameters to the default values.
852

853
    """
854
    # hvparams
855
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
856
    for name in self.op.hvparams.keys():
857
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
858
        del self.op.hvparams[name]
859
    # beparams
860
    be_defs = cluster.SimpleFillBE({})
861
    for name in self.op.beparams.keys():
862
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
863
        del self.op.beparams[name]
864
    # nic params
865
    nic_defs = cluster.SimpleFillNIC({})
866
    for nic in self.op.nics:
867
      for name in constants.NICS_PARAMETERS:
868
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
869
          del nic[name]
870
    # osparams
871
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
872
    for name in self.op.osparams.keys():
873
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
874
        del self.op.osparams[name]
875

    
876
  def _CalculateFileStorageDir(self):
877
    """Calculate final instance file storage dir.
878

879
    """
880
    # file storage dir calculation/check
881
    self.instance_file_storage_dir = None
882
    if self.op.disk_template in constants.DTS_FILEBASED:
883
      # build the full file storage dir path
884
      joinargs = []
885

    
886
      cfg_storage = None
887
      if self.op.disk_template == constants.DT_FILE:
888
        cfg_storage = self.cfg.GetFileStorageDir()
889
      elif self.op.disk_template == constants.DT_SHARED_FILE:
890
        cfg_storage = self.cfg.GetSharedFileStorageDir()
891
      elif self.op.disk_template == constants.DT_GLUSTER:
892
        cfg_storage = self.cfg.GetGlusterStorageDir()
893

    
894
      if not cfg_storage:
895
        raise errors.OpPrereqError(
896
          "Cluster file storage dir for {tpl} storage type not defined".format(
897
            tpl=repr(self.op.disk_template)
898
          ),
899
          errors.ECODE_STATE
900
      )
901

    
902
      joinargs.append(cfg_storage)
903

    
904
      if self.op.file_storage_dir is not None:
905
        joinargs.append(self.op.file_storage_dir)
906

    
907
      if self.op.disk_template != constants.DT_GLUSTER:
908
        joinargs.append(self.op.instance_name)
909

    
910
      if len(joinargs) > 1:
911
        # pylint: disable=W0142
912
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
913
      else:
914
        self.instance_file_storage_dir = joinargs[0]
915

    
916
  def CheckPrereq(self): # pylint: disable=R0914
917
    """Check prerequisites.
918

919
    """
920
    # Check that the optimistically acquired groups are correct wrt the
921
    # acquired nodes
922
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
923
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
924
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
925
    if not owned_groups.issuperset(cur_groups):
926
      raise errors.OpPrereqError("New instance %s's node groups changed since"
927
                                 " locks were acquired, current groups are"
928
                                 " are '%s', owning groups '%s'; retry the"
929
                                 " operation" %
930
                                 (self.op.instance_name,
931
                                  utils.CommaJoin(cur_groups),
932
                                  utils.CommaJoin(owned_groups)),
933
                                 errors.ECODE_STATE)
934

    
935
    self._CalculateFileStorageDir()
936

    
937
    if self.op.mode == constants.INSTANCE_IMPORT:
938
      export_info = self._ReadExportInfo()
939
      self._ReadExportParams(export_info)
940
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
941
    else:
942
      self._old_instance_name = None
943

    
944
    if (not self.cfg.GetVGName() and
945
        self.op.disk_template not in constants.DTS_NOT_LVM):
946
      raise errors.OpPrereqError("Cluster does not support lvm-based"
947
                                 " instances", errors.ECODE_STATE)
948

    
949
    if (self.op.hypervisor is None or
950
        self.op.hypervisor == constants.VALUE_AUTO):
951
      self.op.hypervisor = self.cfg.GetHypervisorType()
952

    
953
    cluster = self.cfg.GetClusterInfo()
954
    enabled_hvs = cluster.enabled_hypervisors
955
    if self.op.hypervisor not in enabled_hvs:
956
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
957
                                 " cluster (%s)" %
958
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
959
                                 errors.ECODE_STATE)
960

    
961
    # Check tag validity
962
    for tag in self.op.tags:
963
      objects.TaggableObject.ValidateTag(tag)
964

    
965
    # check hypervisor parameter syntax (locally)
966
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
967
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
968
                                      self.op.hvparams)
969
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
970
    hv_type.CheckParameterSyntax(filled_hvp)
971
    self.hv_full = filled_hvp
972
    # check that we don't specify global parameters on an instance
973
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
974
                         "instance", "cluster")
975

    
976
    # fill and remember the beparams dict
977
    self.be_full = _ComputeFullBeParams(self.op, cluster)
978

    
979
    # build os parameters
980
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
981

    
982
    # now that hvp/bep are in final format, let's reset to defaults,
983
    # if told to do so
984
    if self.op.identify_defaults:
985
      self._RevertToDefaults(cluster)
986

    
987
    # NIC buildup
988
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
989
                             self.proc.GetECId())
990

    
991
    # disk checks/pre-build
992
    default_vg = self.cfg.GetVGName()
993
    self.disks = ComputeDisks(self.op, default_vg)
994

    
995
    if self.op.mode == constants.INSTANCE_IMPORT:
996
      disk_images = []
997
      for idx in range(len(self.disks)):
998
        option = "disk%d_dump" % idx
999
        if export_info.has_option(constants.INISECT_INS, option):
1000
          # FIXME: are the old os-es, disk sizes, etc. useful?
1001
          export_name = export_info.get(constants.INISECT_INS, option)
1002
          image = utils.PathJoin(self.op.src_path, export_name)
1003
          disk_images.append(image)
1004
        else:
1005
          disk_images.append(False)
1006

    
1007
      self.src_images = disk_images
1008

    
1009
      if self.op.instance_name == self._old_instance_name:
1010
        for idx, nic in enumerate(self.nics):
1011
          if nic.mac == constants.VALUE_AUTO:
1012
            nic_mac_ini = "nic%d_mac" % idx
1013
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1014

    
1015
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1016

    
1017
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1018
    if self.op.ip_check:
1019
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1020
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1021
                                   (self.check_ip, self.op.instance_name),
1022
                                   errors.ECODE_NOTUNIQUE)
1023

    
1024
    #### mac address generation
1025
    # By generating here the mac address both the allocator and the hooks get
1026
    # the real final mac address rather than the 'auto' or 'generate' value.
1027
    # There is a race condition between the generation and the instance object
1028
    # creation, which means that we know the mac is valid now, but we're not
1029
    # sure it will be when we actually add the instance. If things go bad
1030
    # adding the instance will abort because of a duplicate mac, and the
1031
    # creation job will fail.
1032
    for nic in self.nics:
1033
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1034
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1035

    
1036
    #### allocator run
1037

    
1038
    if self.op.iallocator is not None:
1039
      self._RunAllocator()
1040

    
1041
    # Release all unneeded node locks
1042
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1043
                               self.op.src_node_uuid])
1044
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1045
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1046
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1047
    # Release all unneeded group locks
1048
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1049
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1050

    
1051
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1052
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1053
      "Node locks differ from node resource locks"
1054

    
1055
    #### node related checks
1056

    
1057
    # check primary node
1058
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1059
    assert self.pnode is not None, \
1060
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1061
    if pnode.offline:
1062
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1063
                                 pnode.name, errors.ECODE_STATE)
1064
    if pnode.drained:
1065
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1066
                                 pnode.name, errors.ECODE_STATE)
1067
    if not pnode.vm_capable:
1068
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1069
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1070

    
1071
    self.secondaries = []
1072

    
1073
    # Fill in any IPs from IP pools. This must happen here, because we need to
1074
    # know the nic's primary node, as specified by the iallocator
1075
    for idx, nic in enumerate(self.nics):
1076
      net_uuid = nic.network
1077
      if net_uuid is not None:
1078
        nobj = self.cfg.GetNetwork(net_uuid)
1079
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1080
        if netparams is None:
1081
          raise errors.OpPrereqError("No netparams found for network"
1082
                                     " %s. Probably not connected to"
1083
                                     " node's %s nodegroup" %
1084
                                     (nobj.name, self.pnode.name),
1085
                                     errors.ECODE_INVAL)
1086
        self.LogInfo("NIC/%d inherits netparams %s" %
1087
                     (idx, netparams.values()))
1088
        nic.nicparams = dict(netparams)
1089
        if nic.ip is not None:
1090
          if nic.ip.lower() == constants.NIC_IP_POOL:
1091
            try:
1092
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1093
            except errors.ReservationError:
1094
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1095
                                         " from the address pool" % idx,
1096
                                         errors.ECODE_STATE)
1097
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1098
          else:
1099
            try:
1100
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1101
                                 check=self.op.conflicts_check)
1102
            except errors.ReservationError:
1103
              raise errors.OpPrereqError("IP address %s already in use"
1104
                                         " or does not belong to network %s" %
1105
                                         (nic.ip, nobj.name),
1106
                                         errors.ECODE_NOTUNIQUE)
1107

    
1108
      # net is None, ip None or given
1109
      elif self.op.conflicts_check:
1110
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1111

    
1112
    # mirror node verification
1113
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1114
      if self.op.snode_uuid == pnode.uuid:
1115
        raise errors.OpPrereqError("The secondary node cannot be the"
1116
                                   " primary node", errors.ECODE_INVAL)
1117
      CheckNodeOnline(self, self.op.snode_uuid)
1118
      CheckNodeNotDrained(self, self.op.snode_uuid)
1119
      CheckNodeVmCapable(self, self.op.snode_uuid)
1120
      self.secondaries.append(self.op.snode_uuid)
1121

    
1122
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1123
      if pnode.group != snode.group:
1124
        self.LogWarning("The primary and secondary nodes are in two"
1125
                        " different node groups; the disk parameters"
1126
                        " from the first disk's node group will be"
1127
                        " used")
1128

    
1129
    nodes = [pnode]
1130
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1131
      nodes.append(snode)
1132
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1133
    excl_stor = compat.any(map(has_es, nodes))
1134
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1135
      raise errors.OpPrereqError("Disk template %s not supported with"
1136
                                 " exclusive storage" % self.op.disk_template,
1137
                                 errors.ECODE_STATE)
1138
    for disk in self.disks:
1139
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1140

    
1141
    node_uuids = [pnode.uuid] + self.secondaries
1142

    
1143
    if not self.adopt_disks:
1144
      if self.op.disk_template == constants.DT_RBD:
1145
        # _CheckRADOSFreeSpace() is just a placeholder.
1146
        # Any function that checks prerequisites can be placed here.
1147
        # Check if there is enough space on the RADOS cluster.
1148
        CheckRADOSFreeSpace()
1149
      elif self.op.disk_template == constants.DT_EXT:
1150
        # FIXME: Function that checks prereqs if needed
1151
        pass
1152
      elif self.op.disk_template in constants.DTS_LVM:
1153
        # Check lv size requirements, if not adopting
1154
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1155
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1156
      else:
1157
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1158
        pass
1159

    
1160
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1161
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1162
                                disk[constants.IDISK_ADOPT])
1163
                     for disk in self.disks])
1164
      if len(all_lvs) != len(self.disks):
1165
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1166
                                   errors.ECODE_INVAL)
1167
      for lv_name in all_lvs:
1168
        try:
1169
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1170
          # to ReserveLV uses the same syntax
1171
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1172
        except errors.ReservationError:
1173
          raise errors.OpPrereqError("LV named %s used by another instance" %
1174
                                     lv_name, errors.ECODE_NOTUNIQUE)
1175

    
1176
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1177
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1178

    
1179
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1180
                                       vg_names.payload.keys())[pnode.uuid]
1181
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1182
      node_lvs = node_lvs.payload
1183

    
1184
      delta = all_lvs.difference(node_lvs.keys())
1185
      if delta:
1186
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1187
                                   utils.CommaJoin(delta),
1188
                                   errors.ECODE_INVAL)
1189
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1190
      if online_lvs:
1191
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1192
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1193
                                   errors.ECODE_STATE)
1194
      # update the size of disk based on what is found
1195
      for dsk in self.disks:
1196
        dsk[constants.IDISK_SIZE] = \
1197
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1198
                                        dsk[constants.IDISK_ADOPT])][0]))
1199

    
1200
    elif self.op.disk_template == constants.DT_BLOCK:
1201
      # Normalize and de-duplicate device paths
1202
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1203
                       for disk in self.disks])
1204
      if len(all_disks) != len(self.disks):
1205
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1206
                                   errors.ECODE_INVAL)
1207
      baddisks = [d for d in all_disks
1208
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1209
      if baddisks:
1210
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1211
                                   " cannot be adopted" %
1212
                                   (utils.CommaJoin(baddisks),
1213
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1214
                                   errors.ECODE_INVAL)
1215

    
1216
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1217
                                            list(all_disks))[pnode.uuid]
1218
      node_disks.Raise("Cannot get block device information from node %s" %
1219
                       pnode.name)
1220
      node_disks = node_disks.payload
1221
      delta = all_disks.difference(node_disks.keys())
1222
      if delta:
1223
        raise errors.OpPrereqError("Missing block device(s): %s" %
1224
                                   utils.CommaJoin(delta),
1225
                                   errors.ECODE_INVAL)
1226
      for dsk in self.disks:
1227
        dsk[constants.IDISK_SIZE] = \
1228
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1229

    
1230
    # Check disk access param to be compatible with specified hypervisor
1231
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1232
    node_group = self.cfg.GetNodeGroup(node_info.group)
1233
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1234
    access_type = disk_params[self.op.disk_template].get(
1235
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1236
    )
1237

    
1238
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1239
                                            self.op.disk_template,
1240
                                            access_type):
1241
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1242
                                 " used with %s disk access param" %
1243
                                 (self.op.hypervisor, access_type),
1244
                                  errors.ECODE_STATE)
1245

    
1246
    # Verify instance specs
1247
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1248
    ispec = {
1249
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1250
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1251
      constants.ISPEC_DISK_COUNT: len(self.disks),
1252
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1253
                                  for disk in self.disks],
1254
      constants.ISPEC_NIC_COUNT: len(self.nics),
1255
      constants.ISPEC_SPINDLE_USE: spindle_use,
1256
      }
1257

    
1258
    group_info = self.cfg.GetNodeGroup(pnode.group)
1259
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1260
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1261
                                               self.op.disk_template)
1262
    if not self.op.ignore_ipolicy and res:
1263
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1264
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1265
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1266

    
1267
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1268

    
1269
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1270
    # check OS parameters (remotely)
1271
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1272

    
1273
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1274

    
1275
    #TODO: _CheckExtParams (remotely)
1276
    # Check parameters for extstorage
1277

    
1278
    # memory check on primary node
1279
    #TODO(dynmem): use MINMEM for checking
1280
    if self.op.start:
1281
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1282
                                self.op.hvparams)
1283
      CheckNodeFreeMemory(self, self.pnode.uuid,
1284
                          "creating instance %s" % self.op.instance_name,
1285
                          self.be_full[constants.BE_MAXMEM],
1286
                          self.op.hypervisor, hvfull)
1287

    
1288
    self.dry_run_result = list(node_uuids)
1289

    
1290
  def Exec(self, feedback_fn):
1291
    """Create and add the instance to the cluster.
1292

1293
    """
1294
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1295
                self.owned_locks(locking.LEVEL_NODE)), \
1296
      "Node locks differ from node resource locks"
1297
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1298

    
1299
    ht_kind = self.op.hypervisor
1300
    if ht_kind in constants.HTS_REQ_PORT:
1301
      network_port = self.cfg.AllocatePort()
1302
    else:
1303
      network_port = None
1304

    
1305
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1306

    
1307
    # This is ugly but we got a chicken-egg problem here
1308
    # We can only take the group disk parameters, as the instance
1309
    # has no disks yet (we are generating them right here).
1310
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1311
    disks = GenerateDiskTemplate(self,
1312
                                 self.op.disk_template,
1313
                                 instance_uuid, self.pnode.uuid,
1314
                                 self.secondaries,
1315
                                 self.disks,
1316
                                 self.instance_file_storage_dir,
1317
                                 self.op.file_driver,
1318
                                 0,
1319
                                 feedback_fn,
1320
                                 self.cfg.GetGroupDiskParams(nodegroup))
1321

    
1322
    iobj = objects.Instance(name=self.op.instance_name,
1323
                            uuid=instance_uuid,
1324
                            os=self.op.os_type,
1325
                            primary_node=self.pnode.uuid,
1326
                            nics=self.nics, disks=disks,
1327
                            disk_template=self.op.disk_template,
1328
                            disks_active=False,
1329
                            admin_state=constants.ADMINST_DOWN,
1330
                            network_port=network_port,
1331
                            beparams=self.op.beparams,
1332
                            hvparams=self.op.hvparams,
1333
                            hypervisor=self.op.hypervisor,
1334
                            osparams=self.op.osparams,
1335
                            )
1336

    
1337
    if self.op.tags:
1338
      for tag in self.op.tags:
1339
        iobj.AddTag(tag)
1340

    
1341
    if self.adopt_disks:
1342
      if self.op.disk_template == constants.DT_PLAIN:
1343
        # rename LVs to the newly-generated names; we need to construct
1344
        # 'fake' LV disks with the old data, plus the new unique_id
1345
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1346
        rename_to = []
1347
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1348
          rename_to.append(t_dsk.logical_id)
1349
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1350
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1351
                                               zip(tmp_disks, rename_to))
1352
        result.Raise("Failed to rename adoped LVs")
1353
    else:
1354
      feedback_fn("* creating instance disks...")
1355
      try:
1356
        CreateDisks(self, iobj)
1357
      except errors.OpExecError:
1358
        self.LogWarning("Device creation failed")
1359
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1360
        raise
1361

    
1362
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1363

    
1364
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1365

    
1366
    # Declare that we don't want to remove the instance lock anymore, as we've
1367
    # added the instance to the config
1368
    del self.remove_locks[locking.LEVEL_INSTANCE]
1369

    
1370
    if self.op.mode == constants.INSTANCE_IMPORT:
1371
      # Release unused nodes
1372
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1373
    else:
1374
      # Release all nodes
1375
      ReleaseLocks(self, locking.LEVEL_NODE)
1376

    
1377
    disk_abort = False
1378
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1379
      feedback_fn("* wiping instance disks...")
1380
      try:
1381
        WipeDisks(self, iobj)
1382
      except errors.OpExecError, err:
1383
        logging.exception("Wiping disks failed")
1384
        self.LogWarning("Wiping instance disks failed (%s)", err)
1385
        disk_abort = True
1386

    
1387
    if disk_abort:
1388
      # Something is already wrong with the disks, don't do anything else
1389
      pass
1390
    elif self.op.wait_for_sync:
1391
      disk_abort = not WaitForSync(self, iobj)
1392
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1393
      # make sure the disks are not degraded (still sync-ing is ok)
1394
      feedback_fn("* checking mirrors status")
1395
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1396
    else:
1397
      disk_abort = False
1398

    
1399
    if disk_abort:
1400
      RemoveDisks(self, iobj)
1401
      self.cfg.RemoveInstance(iobj.uuid)
1402
      # Make sure the instance lock gets removed
1403
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1404
      raise errors.OpExecError("There are some degraded disks for"
1405
                               " this instance")
1406

    
1407
    # instance disks are now active
1408
    iobj.disks_active = True
1409

    
1410
    # Release all node resource locks
1411
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1412

    
1413
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1414
      if self.op.mode == constants.INSTANCE_CREATE:
1415
        if not self.op.no_install:
1416
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1417
                        not self.op.wait_for_sync)
1418
          if pause_sync:
1419
            feedback_fn("* pausing disk sync to install instance OS")
1420
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1421
                                                              (iobj.disks,
1422
                                                               iobj), True)
1423
            for idx, success in enumerate(result.payload):
1424
              if not success:
1425
                logging.warn("pause-sync of instance %s for disk %d failed",
1426
                             self.op.instance_name, idx)
1427

    
1428
          feedback_fn("* running the instance OS create scripts...")
1429
          # FIXME: pass debug option from opcode to backend
1430
          os_add_result = \
1431
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1432
                                          self.op.debug_level)
1433
          if pause_sync:
1434
            feedback_fn("* resuming disk sync")
1435
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1436
                                                              (iobj.disks,
1437
                                                               iobj), False)
1438
            for idx, success in enumerate(result.payload):
1439
              if not success:
1440
                logging.warn("resume-sync of instance %s for disk %d failed",
1441
                             self.op.instance_name, idx)
1442

    
1443
          os_add_result.Raise("Could not add os for instance %s"
1444
                              " on node %s" % (self.op.instance_name,
1445
                                               self.pnode.name))
1446

    
1447
      else:
1448
        if self.op.mode == constants.INSTANCE_IMPORT:
1449
          feedback_fn("* running the instance OS import scripts...")
1450

    
1451
          transfers = []
1452

    
1453
          for idx, image in enumerate(self.src_images):
1454
            if not image:
1455
              continue
1456

    
1457
            # FIXME: pass debug option from opcode to backend
1458
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1459
                                               constants.IEIO_FILE, (image, ),
1460
                                               constants.IEIO_SCRIPT,
1461
                                               ((iobj.disks[idx], iobj), idx),
1462
                                               None)
1463
            transfers.append(dt)
1464

    
1465
          import_result = \
1466
            masterd.instance.TransferInstanceData(self, feedback_fn,
1467
                                                  self.op.src_node_uuid,
1468
                                                  self.pnode.uuid,
1469
                                                  self.pnode.secondary_ip,
1470
                                                  self.op.compress,
1471
                                                  iobj, transfers)
1472
          if not compat.all(import_result):
1473
            self.LogWarning("Some disks for instance %s on node %s were not"
1474
                            " imported successfully" % (self.op.instance_name,
1475
                                                        self.pnode.name))
1476

    
1477
          rename_from = self._old_instance_name
1478

    
1479
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1480
          feedback_fn("* preparing remote import...")
1481
          # The source cluster will stop the instance before attempting to make
1482
          # a connection. In some cases stopping an instance can take a long
1483
          # time, hence the shutdown timeout is added to the connection
1484
          # timeout.
1485
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1486
                             self.op.source_shutdown_timeout)
1487
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1488

    
1489
          assert iobj.primary_node == self.pnode.uuid
1490
          disk_results = \
1491
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1492
                                          self.source_x509_ca,
1493
                                          self._cds, self.op.compress, timeouts)
1494
          if not compat.all(disk_results):
1495
            # TODO: Should the instance still be started, even if some disks
1496
            # failed to import (valid for local imports, too)?
1497
            self.LogWarning("Some disks for instance %s on node %s were not"
1498
                            " imported successfully" % (self.op.instance_name,
1499
                                                        self.pnode.name))
1500

    
1501
          rename_from = self.source_instance_name
1502

    
1503
        else:
1504
          # also checked in the prereq part
1505
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1506
                                       % self.op.mode)
1507

    
1508
        # Run rename script on newly imported instance
1509
        assert iobj.name == self.op.instance_name
1510
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1511
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1512
                                                   rename_from,
1513
                                                   self.op.debug_level)
1514
        result.Warn("Failed to run rename script for %s on node %s" %
1515
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1516

    
1517
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1518

    
1519
    if self.op.start:
1520
      iobj.admin_state = constants.ADMINST_UP
1521
      self.cfg.Update(iobj, feedback_fn)
1522
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1523
                   self.pnode.name)
1524
      feedback_fn("* starting instance...")
1525
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1526
                                            False, self.op.reason)
1527
      result.Raise("Could not start instance")
1528

    
1529
    return list(iobj.all_nodes)
1530

    
1531

    
1532
class LUInstanceRename(LogicalUnit):
1533
  """Rename an instance.
1534

1535
  """
1536
  HPATH = "instance-rename"
1537
  HTYPE = constants.HTYPE_INSTANCE
1538

    
1539
  def CheckArguments(self):
1540
    """Check arguments.
1541

1542
    """
1543
    if self.op.ip_check and not self.op.name_check:
1544
      # TODO: make the ip check more flexible and not depend on the name check
1545
      raise errors.OpPrereqError("IP address check requires a name check",
1546
                                 errors.ECODE_INVAL)
1547

    
1548
  def BuildHooksEnv(self):
1549
    """Build hooks env.
1550

1551
    This runs on master, primary and secondary nodes of the instance.
1552

1553
    """
1554
    env = BuildInstanceHookEnvByObject(self, self.instance)
1555
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1556
    return env
1557

    
1558
  def BuildHooksNodes(self):
1559
    """Build hooks nodes.
1560

1561
    """
1562
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1563
    return (nl, nl)
1564

    
1565
  def CheckPrereq(self):
1566
    """Check prerequisites.
1567

1568
    This checks that the instance is in the cluster and is not running.
1569

1570
    """
1571
    (self.op.instance_uuid, self.op.instance_name) = \
1572
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1573
                                self.op.instance_name)
1574
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1575
    assert instance is not None
1576

    
1577
    # It should actually not happen that an instance is running with a disabled
1578
    # disk template, but in case it does, the renaming of file-based instances
1579
    # will fail horribly. Thus, we test it before.
1580
    if (instance.disk_template in constants.DTS_FILEBASED and
1581
        self.op.new_name != instance.name):
1582
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1583
                               instance.disk_template)
1584

    
1585
    CheckNodeOnline(self, instance.primary_node)
1586
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1587
                       msg="cannot rename")
1588
    self.instance = instance
1589

    
1590
    new_name = self.op.new_name
1591
    if self.op.name_check:
1592
      hostname = _CheckHostnameSane(self, new_name)
1593
      new_name = self.op.new_name = hostname.name
1594
      if (self.op.ip_check and
1595
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1596
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1597
                                   (hostname.ip, new_name),
1598
                                   errors.ECODE_NOTUNIQUE)
1599

    
1600
    instance_names = [inst.name for
1601
                      inst in self.cfg.GetAllInstancesInfo().values()]
1602
    if new_name in instance_names and new_name != instance.name:
1603
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1604
                                 new_name, errors.ECODE_EXISTS)
1605

    
1606
  def Exec(self, feedback_fn):
1607
    """Rename the instance.
1608

1609
    """
1610
    old_name = self.instance.name
1611

    
1612
    rename_file_storage = False
1613
    if (self.instance.disk_template in (constants.DT_FILE,
1614
                                        constants.DT_SHARED_FILE) and
1615
        self.op.new_name != self.instance.name):
1616
      old_file_storage_dir = os.path.dirname(
1617
                               self.instance.disks[0].logical_id[1])
1618
      rename_file_storage = True
1619

    
1620
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1621
    # Change the instance lock. This is definitely safe while we hold the BGL.
1622
    # Otherwise the new lock would have to be added in acquired mode.
1623
    assert self.REQ_BGL
1624
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1625
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1626
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1627

    
1628
    # re-read the instance from the configuration after rename
1629
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1630

    
1631
    if rename_file_storage:
1632
      new_file_storage_dir = os.path.dirname(
1633
                               renamed_inst.disks[0].logical_id[1])
1634
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1635
                                                     old_file_storage_dir,
1636
                                                     new_file_storage_dir)
1637
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1638
                   " (but the instance has been renamed in Ganeti)" %
1639
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1640
                    old_file_storage_dir, new_file_storage_dir))
1641

    
1642
    StartInstanceDisks(self, renamed_inst, None)
1643
    # update info on disks
1644
    info = GetInstanceInfoText(renamed_inst)
1645
    for (idx, disk) in enumerate(renamed_inst.disks):
1646
      for node_uuid in renamed_inst.all_nodes:
1647
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1648
                                                (disk, renamed_inst), info)
1649
        result.Warn("Error setting info on node %s for disk %s" %
1650
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1651
    try:
1652
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1653
                                                 renamed_inst, old_name,
1654
                                                 self.op.debug_level)
1655
      result.Warn("Could not run OS rename script for instance %s on node %s"
1656
                  " (but the instance has been renamed in Ganeti)" %
1657
                  (renamed_inst.name,
1658
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1659
                  self.LogWarning)
1660
    finally:
1661
      ShutdownInstanceDisks(self, renamed_inst)
1662

    
1663
    return renamed_inst.name
1664

    
1665

    
1666
class LUInstanceRemove(LogicalUnit):
1667
  """Remove an instance.
1668

1669
  """
1670
  HPATH = "instance-remove"
1671
  HTYPE = constants.HTYPE_INSTANCE
1672
  REQ_BGL = False
1673

    
1674
  def ExpandNames(self):
1675
    self._ExpandAndLockInstance()
1676
    self.needed_locks[locking.LEVEL_NODE] = []
1677
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1678
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1679

    
1680
  def DeclareLocks(self, level):
1681
    if level == locking.LEVEL_NODE:
1682
      self._LockInstancesNodes()
1683
    elif level == locking.LEVEL_NODE_RES:
1684
      # Copy node locks
1685
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1686
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1687

    
1688
  def BuildHooksEnv(self):
1689
    """Build hooks env.
1690

1691
    This runs on master, primary and secondary nodes of the instance.
1692

1693
    """
1694
    env = BuildInstanceHookEnvByObject(self, self.instance)
1695
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1696
    return env
1697

    
1698
  def BuildHooksNodes(self):
1699
    """Build hooks nodes.
1700

1701
    """
1702
    nl = [self.cfg.GetMasterNode()]
1703
    nl_post = list(self.instance.all_nodes) + nl
1704
    return (nl, nl_post)
1705

    
1706
  def CheckPrereq(self):
1707
    """Check prerequisites.
1708

1709
    This checks that the instance is in the cluster.
1710

1711
    """
1712
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1713
    assert self.instance is not None, \
1714
      "Cannot retrieve locked instance %s" % self.op.instance_name
1715

    
1716
  def Exec(self, feedback_fn):
1717
    """Remove the instance.
1718

1719
    """
1720
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1721
                 self.cfg.GetNodeName(self.instance.primary_node))
1722

    
1723
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1724
                                             self.instance,
1725
                                             self.op.shutdown_timeout,
1726
                                             self.op.reason)
1727
    if self.op.ignore_failures:
1728
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1729
    else:
1730
      result.Raise("Could not shutdown instance %s on node %s" %
1731
                   (self.instance.name,
1732
                    self.cfg.GetNodeName(self.instance.primary_node)))
1733

    
1734
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1735
            self.owned_locks(locking.LEVEL_NODE_RES))
1736
    assert not (set(self.instance.all_nodes) -
1737
                self.owned_locks(locking.LEVEL_NODE)), \
1738
      "Not owning correct locks"
1739

    
1740
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1741

    
1742

    
1743
class LUInstanceMove(LogicalUnit):
1744
  """Move an instance by data-copying.
1745

1746
  """
1747
  HPATH = "instance-move"
1748
  HTYPE = constants.HTYPE_INSTANCE
1749
  REQ_BGL = False
1750

    
1751
  def ExpandNames(self):
1752
    self._ExpandAndLockInstance()
1753
    (self.op.target_node_uuid, self.op.target_node) = \
1754
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1755
                            self.op.target_node)
1756
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1757
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1758
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1759

    
1760
  def DeclareLocks(self, level):
1761
    if level == locking.LEVEL_NODE:
1762
      self._LockInstancesNodes(primary_only=True)
1763
    elif level == locking.LEVEL_NODE_RES:
1764
      # Copy node locks
1765
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1766
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1767

    
1768
  def BuildHooksEnv(self):
1769
    """Build hooks env.
1770

1771
    This runs on master, primary and target nodes of the instance.
1772

1773
    """
1774
    env = {
1775
      "TARGET_NODE": self.op.target_node,
1776
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1777
      }
1778
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1779
    return env
1780

    
1781
  def BuildHooksNodes(self):
1782
    """Build hooks nodes.
1783

1784
    """
1785
    nl = [
1786
      self.cfg.GetMasterNode(),
1787
      self.instance.primary_node,
1788
      self.op.target_node_uuid,
1789
      ]
1790
    return (nl, nl)
1791

    
1792
  def CheckPrereq(self):
1793
    """Check prerequisites.
1794

1795
    This checks that the instance is in the cluster.
1796

1797
    """
1798
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1799
    assert self.instance is not None, \
1800
      "Cannot retrieve locked instance %s" % self.op.instance_name
1801

    
1802
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1803
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1804
                                 self.instance.disk_template,
1805
                                 errors.ECODE_STATE)
1806

    
1807
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1808
    assert target_node is not None, \
1809
      "Cannot retrieve locked node %s" % self.op.target_node
1810

    
1811
    self.target_node_uuid = target_node.uuid
1812
    if target_node.uuid == self.instance.primary_node:
1813
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1814
                                 (self.instance.name, target_node.name),
1815
                                 errors.ECODE_STATE)
1816

    
1817
    cluster = self.cfg.GetClusterInfo()
1818
    bep = cluster.FillBE(self.instance)
1819

    
1820
    for idx, dsk in enumerate(self.instance.disks):
1821
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1822
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1823
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1824
                                   " cannot copy" % idx, errors.ECODE_STATE)
1825

    
1826
    CheckNodeOnline(self, target_node.uuid)
1827
    CheckNodeNotDrained(self, target_node.uuid)
1828
    CheckNodeVmCapable(self, target_node.uuid)
1829
    group_info = self.cfg.GetNodeGroup(target_node.group)
1830
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1831
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1832
                           ignore=self.op.ignore_ipolicy)
1833

    
1834
    if self.instance.admin_state == constants.ADMINST_UP:
1835
      # check memory requirements on the target node
1836
      CheckNodeFreeMemory(
1837
          self, target_node.uuid, "failing over instance %s" %
1838
          self.instance.name, bep[constants.BE_MAXMEM],
1839
          self.instance.hypervisor,
1840
          cluster.hvparams[self.instance.hypervisor])
1841
    else:
1842
      self.LogInfo("Not checking memory on the secondary node as"
1843
                   " instance will not be started")
1844

    
1845
    # check bridge existance
1846
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1847

    
1848
  def Exec(self, feedback_fn):
1849
    """Move an instance.
1850

1851
    The move is done by shutting it down on its present node, copying
1852
    the data over (slow) and starting it on the new node.
1853

1854
    """
1855
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1856
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1857

    
1858
    self.LogInfo("Shutting down instance %s on source node %s",
1859
                 self.instance.name, source_node.name)
1860

    
1861
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1862
            self.owned_locks(locking.LEVEL_NODE_RES))
1863

    
1864
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1865
                                             self.op.shutdown_timeout,
1866
                                             self.op.reason)
1867
    if self.op.ignore_consistency:
1868
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1869
                  " anyway. Please make sure node %s is down. Error details" %
1870
                  (self.instance.name, source_node.name, source_node.name),
1871
                  self.LogWarning)
1872
    else:
1873
      result.Raise("Could not shutdown instance %s on node %s" %
1874
                   (self.instance.name, source_node.name))
1875

    
1876
    # create the target disks
1877
    try:
1878
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1879
    except errors.OpExecError:
1880
      self.LogWarning("Device creation failed")
1881
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1882
      raise
1883

    
1884
    errs = []
1885
    transfers = []
1886
    # activate, get path, create transfer jobs
1887
    for idx, disk in enumerate(self.instance.disks):
1888
      # FIXME: pass debug option from opcode to backend
1889
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1890
                                         constants.IEIO_RAW_DISK,
1891
                                         (disk, self.instance),
1892
                                         constants.IEIO_RAW_DISK,
1893
                                         (disk, self.instance),
1894
                                         None)
1895
      transfers.append(dt)
1896

    
1897
    import_result = \
1898
      masterd.instance.TransferInstanceData(self, feedback_fn,
1899
                                            source_node.uuid,
1900
                                            target_node.uuid,
1901
                                            target_node.secondary_ip,
1902
                                            self.op.compress,
1903
                                            self.instance, transfers)
1904
    if not compat.all(import_result):
1905
      errs.append("Failed to transfer instance data")
1906

    
1907
    if errs:
1908
      self.LogWarning("Some disks failed to copy, aborting")
1909
      try:
1910
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1911
      finally:
1912
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1913
        raise errors.OpExecError("Errors during disk copy: %s" %
1914
                                 (",".join(errs),))
1915

    
1916
    self.instance.primary_node = target_node.uuid
1917
    self.cfg.Update(self.instance, feedback_fn)
1918

    
1919
    self.LogInfo("Removing the disks on the original node")
1920
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1921

    
1922
    # Only start the instance if it's marked as up
1923
    if self.instance.admin_state == constants.ADMINST_UP:
1924
      self.LogInfo("Starting instance %s on node %s",
1925
                   self.instance.name, target_node.name)
1926

    
1927
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1928
                                          ignore_secondaries=True)
1929
      if not disks_ok:
1930
        ShutdownInstanceDisks(self, self.instance)
1931
        raise errors.OpExecError("Can't activate the instance's disks")
1932

    
1933
      result = self.rpc.call_instance_start(target_node.uuid,
1934
                                            (self.instance, None, None), False,
1935
                                            self.op.reason)
1936
      msg = result.fail_msg
1937
      if msg:
1938
        ShutdownInstanceDisks(self, self.instance)
1939
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1940
                                 (self.instance.name, target_node.name, msg))
1941

    
1942

    
1943
class LUInstanceMultiAlloc(NoHooksLU):
1944
  """Allocates multiple instances at the same time.
1945

1946
  """
1947
  REQ_BGL = False
1948

    
1949
  def CheckArguments(self):
1950
    """Check arguments.
1951

1952
    """
1953
    nodes = []
1954
    for inst in self.op.instances:
1955
      if inst.iallocator is not None:
1956
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1957
                                   " instance objects", errors.ECODE_INVAL)
1958
      nodes.append(bool(inst.pnode))
1959
      if inst.disk_template in constants.DTS_INT_MIRROR:
1960
        nodes.append(bool(inst.snode))
1961

    
1962
    has_nodes = compat.any(nodes)
1963
    if compat.all(nodes) ^ has_nodes:
1964
      raise errors.OpPrereqError("There are instance objects providing"
1965
                                 " pnode/snode while others do not",
1966
                                 errors.ECODE_INVAL)
1967

    
1968
    if not has_nodes and self.op.iallocator is None:
1969
      default_iallocator = self.cfg.GetDefaultIAllocator()
1970
      if default_iallocator:
1971
        self.op.iallocator = default_iallocator
1972
      else:
1973
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1974
                                   " given and no cluster-wide default"
1975
                                   " iallocator found; please specify either"
1976
                                   " an iallocator or nodes on the instances"
1977
                                   " or set a cluster-wide default iallocator",
1978
                                   errors.ECODE_INVAL)
1979

    
1980
    _CheckOpportunisticLocking(self.op)
1981

    
1982
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1983
    if dups:
1984
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1985
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1986

    
1987
  def ExpandNames(self):
1988
    """Calculate the locks.
1989

1990
    """
1991
    self.share_locks = ShareAll()
1992
    self.needed_locks = {
1993
      # iallocator will select nodes and even if no iallocator is used,
1994
      # collisions with LUInstanceCreate should be avoided
1995
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1996
      }
1997

    
1998
    if self.op.iallocator:
1999
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2000
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2001

    
2002
      if self.op.opportunistic_locking:
2003
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2004
    else:
2005
      nodeslist = []
2006
      for inst in self.op.instances:
2007
        (inst.pnode_uuid, inst.pnode) = \
2008
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2009
        nodeslist.append(inst.pnode_uuid)
2010
        if inst.snode is not None:
2011
          (inst.snode_uuid, inst.snode) = \
2012
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2013
          nodeslist.append(inst.snode_uuid)
2014

    
2015
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2016
      # Lock resources of instance's primary and secondary nodes (copy to
2017
      # prevent accidential modification)
2018
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2019

    
2020
  def DeclareLocks(self, level):
2021
    if level == locking.LEVEL_NODE_RES and \
2022
      self.opportunistic_locks[locking.LEVEL_NODE]:
2023
      # Even when using opportunistic locking, we require the same set of
2024
      # NODE_RES locks as we got NODE locks
2025
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2026
        self.owned_locks(locking.LEVEL_NODE)
2027

    
2028
  def CheckPrereq(self):
2029
    """Check prerequisite.
2030

2031
    """
2032
    if self.op.iallocator:
2033
      cluster = self.cfg.GetClusterInfo()
2034
      default_vg = self.cfg.GetVGName()
2035
      ec_id = self.proc.GetECId()
2036

    
2037
      if self.op.opportunistic_locking:
2038
        # Only consider nodes for which a lock is held
2039
        node_whitelist = self.cfg.GetNodeNames(
2040
                           list(self.owned_locks(locking.LEVEL_NODE)))
2041
      else:
2042
        node_whitelist = None
2043

    
2044
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2045
                                           _ComputeNics(op, cluster, None,
2046
                                                        self.cfg, ec_id),
2047
                                           _ComputeFullBeParams(op, cluster),
2048
                                           node_whitelist)
2049
               for op in self.op.instances]
2050

    
2051
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2052
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2053

    
2054
      ial.Run(self.op.iallocator)
2055

    
2056
      if not ial.success:
2057
        raise errors.OpPrereqError("Can't compute nodes using"
2058
                                   " iallocator '%s': %s" %
2059
                                   (self.op.iallocator, ial.info),
2060
                                   errors.ECODE_NORES)
2061

    
2062
      self.ia_result = ial.result
2063

    
2064
    if self.op.dry_run:
2065
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2066
        constants.JOB_IDS_KEY: [],
2067
        })
2068

    
2069
  def _ConstructPartialResult(self):
2070
    """Contructs the partial result.
2071

2072
    """
2073
    if self.op.iallocator:
2074
      (allocatable, failed_insts) = self.ia_result
2075
      allocatable_insts = map(compat.fst, allocatable)
2076
    else:
2077
      allocatable_insts = [op.instance_name for op in self.op.instances]
2078
      failed_insts = []
2079

    
2080
    return {
2081
      constants.ALLOCATABLE_KEY: allocatable_insts,
2082
      constants.FAILED_KEY: failed_insts,
2083
      }
2084

    
2085
  def Exec(self, feedback_fn):
2086
    """Executes the opcode.
2087

2088
    """
2089
    jobs = []
2090
    if self.op.iallocator:
2091
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2092
      (allocatable, failed) = self.ia_result
2093

    
2094
      for (name, node_names) in allocatable:
2095
        op = op2inst.pop(name)
2096

    
2097
        (op.pnode_uuid, op.pnode) = \
2098
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2099
        if len(node_names) > 1:
2100
          (op.snode_uuid, op.snode) = \
2101
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2102

    
2103
          jobs.append([op])
2104

    
2105
        missing = set(op2inst.keys()) - set(failed)
2106
        assert not missing, \
2107
          "Iallocator did return incomplete result: %s" % \
2108
          utils.CommaJoin(missing)
2109
    else:
2110
      jobs.extend([op] for op in self.op.instances)
2111

    
2112
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2113

    
2114

    
2115
class _InstNicModPrivate:
2116
  """Data structure for network interface modifications.
2117

2118
  Used by L{LUInstanceSetParams}.
2119

2120
  """
2121
  def __init__(self):
2122
    self.params = None
2123
    self.filled = None
2124

    
2125

    
2126
def _PrepareContainerMods(mods, private_fn):
2127
  """Prepares a list of container modifications by adding a private data field.
2128

2129
  @type mods: list of tuples; (operation, index, parameters)
2130
  @param mods: List of modifications
2131
  @type private_fn: callable or None
2132
  @param private_fn: Callable for constructing a private data field for a
2133
    modification
2134
  @rtype: list
2135

2136
  """
2137
  if private_fn is None:
2138
    fn = lambda: None
2139
  else:
2140
    fn = private_fn
2141

    
2142
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2143

    
2144

    
2145
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2146
  """Checks if nodes have enough physical CPUs
2147

2148
  This function checks if all given nodes have the needed number of
2149
  physical CPUs. In case any node has less CPUs or we cannot get the
2150
  information from the node, this function raises an OpPrereqError
2151
  exception.
2152

2153
  @type lu: C{LogicalUnit}
2154
  @param lu: a logical unit from which we get configuration data
2155
  @type node_uuids: C{list}
2156
  @param node_uuids: the list of node UUIDs to check
2157
  @type requested: C{int}
2158
  @param requested: the minimum acceptable number of physical CPUs
2159
  @type hypervisor_specs: list of pairs (string, dict of strings)
2160
  @param hypervisor_specs: list of hypervisor specifications in
2161
      pairs (hypervisor_name, hvparams)
2162
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2163
      or we cannot check the node
2164

2165
  """
2166
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2167
  for node_uuid in node_uuids:
2168
    info = nodeinfo[node_uuid]
2169
    node_name = lu.cfg.GetNodeName(node_uuid)
2170
    info.Raise("Cannot get current information from node %s" % node_name,
2171
               prereq=True, ecode=errors.ECODE_ENVIRON)
2172
    (_, _, (hv_info, )) = info.payload
2173
    num_cpus = hv_info.get("cpu_total", None)
2174
    if not isinstance(num_cpus, int):
2175
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2176
                                 " on node %s, result was '%s'" %
2177
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2178
    if requested > num_cpus:
2179
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2180
                                 "required" % (node_name, num_cpus, requested),
2181
                                 errors.ECODE_NORES)
2182

    
2183

    
2184
def GetItemFromContainer(identifier, kind, container):
2185
  """Return the item refered by the identifier.
2186

2187
  @type identifier: string
2188
  @param identifier: Item index or name or UUID
2189
  @type kind: string
2190
  @param kind: One-word item description
2191
  @type container: list
2192
  @param container: Container to get the item from
2193

2194
  """
2195
  # Index
2196
  try:
2197
    idx = int(identifier)
2198
    if idx == -1:
2199
      # Append
2200
      absidx = len(container) - 1
2201
    elif idx < 0:
2202
      raise IndexError("Not accepting negative indices other than -1")
2203
    elif idx > len(container):
2204
      raise IndexError("Got %s index %s, but there are only %s" %
2205
                       (kind, idx, len(container)))
2206
    else:
2207
      absidx = idx
2208
    return (absidx, container[idx])
2209
  except ValueError:
2210
    pass
2211

    
2212
  for idx, item in enumerate(container):
2213
    if item.uuid == identifier or item.name == identifier:
2214
      return (idx, item)
2215

    
2216
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2217
                             (kind, identifier), errors.ECODE_NOENT)
2218

    
2219

    
2220
def _ApplyContainerMods(kind, container, chgdesc, mods,
2221
                        create_fn, modify_fn, remove_fn,
2222
                        post_add_fn=None):
2223
  """Applies descriptions in C{mods} to C{container}.
2224

2225
  @type kind: string
2226
  @param kind: One-word item description
2227
  @type container: list
2228
  @param container: Container to modify
2229
  @type chgdesc: None or list
2230
  @param chgdesc: List of applied changes
2231
  @type mods: list
2232
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2233
  @type create_fn: callable
2234
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2235
    receives absolute item index, parameters and private data object as added
2236
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2237
    as list
2238
  @type modify_fn: callable
2239
  @param modify_fn: Callback for modifying an existing item
2240
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2241
    and private data object as added by L{_PrepareContainerMods}, returns
2242
    changes as list
2243
  @type remove_fn: callable
2244
  @param remove_fn: Callback on removing item; receives absolute item index,
2245
    item and private data object as added by L{_PrepareContainerMods}
2246
  @type post_add_fn: callable
2247
  @param post_add_fn: Callable for post-processing a newly created item after
2248
    it has been put into the container. It receives the index of the new item
2249
    and the new item as parameters.
2250

2251
  """
2252
  for (op, identifier, params, private) in mods:
2253
    changes = None
2254

    
2255
    if op == constants.DDM_ADD:
2256
      # Calculate where item will be added
2257
      # When adding an item, identifier can only be an index
2258
      try:
2259
        idx = int(identifier)
2260
      except ValueError:
2261
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2262
                                   " identifier for %s" % constants.DDM_ADD,
2263
                                   errors.ECODE_INVAL)
2264
      if idx == -1:
2265
        addidx = len(container)
2266
      else:
2267
        if idx < 0:
2268
          raise IndexError("Not accepting negative indices other than -1")
2269
        elif idx > len(container):
2270
          raise IndexError("Got %s index %s, but there are only %s" %
2271
                           (kind, idx, len(container)))
2272
        addidx = idx
2273

    
2274
      if create_fn is None:
2275
        item = params
2276
      else:
2277
        (item, changes) = create_fn(addidx, params, private)
2278

    
2279
      if idx == -1:
2280
        container.append(item)
2281
      else:
2282
        assert idx >= 0
2283
        assert idx <= len(container)
2284
        # list.insert does so before the specified index
2285
        container.insert(idx, item)
2286

    
2287
      if post_add_fn is not None:
2288
        post_add_fn(addidx, item)
2289

    
2290
    else:
2291
      # Retrieve existing item
2292
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2293

    
2294
      if op == constants.DDM_REMOVE:
2295
        assert not params
2296

    
2297
        changes = [("%s/%s" % (kind, absidx), "remove")]
2298

    
2299
        if remove_fn is not None:
2300
          msg = remove_fn(absidx, item, private)
2301
          if msg:
2302
            changes.append(("%s/%s" % (kind, absidx), msg))
2303

    
2304
        assert container[absidx] == item
2305
        del container[absidx]
2306
      elif op == constants.DDM_MODIFY:
2307
        if modify_fn is not None:
2308
          changes = modify_fn(absidx, item, params, private)
2309
      else:
2310
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2311

    
2312
    assert _TApplyContModsCbChanges(changes)
2313

    
2314
    if not (chgdesc is None or changes is None):
2315
      chgdesc.extend(changes)
2316

    
2317

    
2318
def _UpdateIvNames(base_index, disks):
2319
  """Updates the C{iv_name} attribute of disks.
2320

2321
  @type disks: list of L{objects.Disk}
2322

2323
  """
2324
  for (idx, disk) in enumerate(disks):
2325
    disk.iv_name = "disk/%s" % (base_index + idx, )
2326

    
2327

    
2328
class LUInstanceSetParams(LogicalUnit):
2329
  """Modifies an instances's parameters.
2330

2331
  """
2332
  HPATH = "instance-modify"
2333
  HTYPE = constants.HTYPE_INSTANCE
2334
  REQ_BGL = False
2335

    
2336
  @staticmethod
2337
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2338
    assert ht.TList(mods)
2339
    assert not mods or len(mods[0]) in (2, 3)
2340

    
2341
    if mods and len(mods[0]) == 2:
2342
      result = []
2343

    
2344
      addremove = 0
2345
      for op, params in mods:
2346
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2347
          result.append((op, -1, params))
2348
          addremove += 1
2349

    
2350
          if addremove > 1:
2351
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2352
                                       " supported at a time" % kind,
2353
                                       errors.ECODE_INVAL)
2354
        else:
2355
          result.append((constants.DDM_MODIFY, op, params))
2356

    
2357
      assert verify_fn(result)
2358
    else:
2359
      result = mods
2360

    
2361
    return result
2362

    
2363
  @staticmethod
2364
  def _CheckMods(kind, mods, key_types, item_fn):
2365
    """Ensures requested disk/NIC modifications are valid.
2366

2367
    """
2368
    for (op, _, params) in mods:
2369
      assert ht.TDict(params)
2370

    
2371
      # If 'key_types' is an empty dict, we assume we have an
2372
      # 'ext' template and thus do not ForceDictType
2373
      if key_types:
2374
        utils.ForceDictType(params, key_types)
2375

    
2376
      if op == constants.DDM_REMOVE:
2377
        if params:
2378
          raise errors.OpPrereqError("No settings should be passed when"
2379
                                     " removing a %s" % kind,
2380
                                     errors.ECODE_INVAL)
2381
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2382
        item_fn(op, params)
2383
      else:
2384
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2385

    
2386
  def _VerifyDiskModification(self, op, params, excl_stor):
2387
    """Verifies a disk modification.
2388

2389
    """
2390
    if op == constants.DDM_ADD:
2391
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2392
      if mode not in constants.DISK_ACCESS_SET:
2393
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2394
                                   errors.ECODE_INVAL)
2395

    
2396
      size = params.get(constants.IDISK_SIZE, None)
2397
      if size is None:
2398
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2399
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2400
      size = int(size)
2401

    
2402
      params[constants.IDISK_SIZE] = size
2403
      name = params.get(constants.IDISK_NAME, None)
2404
      if name is not None and name.lower() == constants.VALUE_NONE:
2405
        params[constants.IDISK_NAME] = None
2406

    
2407
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2408

    
2409
    elif op == constants.DDM_MODIFY:
2410
      if constants.IDISK_SIZE in params:
2411
        raise errors.OpPrereqError("Disk size change not possible, use"
2412
                                   " grow-disk", errors.ECODE_INVAL)
2413

    
2414
      # Disk modification supports changing only the disk name and mode.
2415
      # Changing arbitrary parameters is allowed only for ext disk template",
2416
      if self.instance.disk_template != constants.DT_EXT:
2417
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2418

    
2419
      name = params.get(constants.IDISK_NAME, None)
2420
      if name is not None and name.lower() == constants.VALUE_NONE:
2421
        params[constants.IDISK_NAME] = None
2422

    
2423
  @staticmethod
2424
  def _VerifyNicModification(op, params):
2425
    """Verifies a network interface modification.
2426

2427
    """
2428
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2429
      ip = params.get(constants.INIC_IP, None)
2430
      name = params.get(constants.INIC_NAME, None)
2431
      req_net = params.get(constants.INIC_NETWORK, None)
2432
      link = params.get(constants.NIC_LINK, None)
2433
      mode = params.get(constants.NIC_MODE, None)
2434
      if name is not None and name.lower() == constants.VALUE_NONE:
2435
        params[constants.INIC_NAME] = None
2436
      if req_net is not None:
2437
        if req_net.lower() == constants.VALUE_NONE:
2438
          params[constants.INIC_NETWORK] = None
2439
          req_net = None
2440
        elif link is not None or mode is not None:
2441
          raise errors.OpPrereqError("If network is given"
2442
                                     " mode or link should not",
2443
                                     errors.ECODE_INVAL)
2444

    
2445
      if op == constants.DDM_ADD:
2446
        macaddr = params.get(constants.INIC_MAC, None)
2447
        if macaddr is None:
2448
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2449

    
2450
      if ip is not None:
2451
        if ip.lower() == constants.VALUE_NONE:
2452
          params[constants.INIC_IP] = None
2453
        else:
2454
          if ip.lower() == constants.NIC_IP_POOL:
2455
            if op == constants.DDM_ADD and req_net is None:
2456
              raise errors.OpPrereqError("If ip=pool, parameter network"
2457
                                         " cannot be none",
2458
                                         errors.ECODE_INVAL)
2459
          else:
2460
            if not netutils.IPAddress.IsValid(ip):
2461
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2462
                                         errors.ECODE_INVAL)
2463

    
2464
      if constants.INIC_MAC in params:
2465
        macaddr = params[constants.INIC_MAC]
2466
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2467
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2468

    
2469
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2470
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2471
                                     " modifying an existing NIC",
2472
                                     errors.ECODE_INVAL)
2473

    
2474
  def CheckArguments(self):
2475
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2476
            self.op.hvparams or self.op.beparams or self.op.os_name or
2477
            self.op.osparams or self.op.offline is not None or
2478
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private):
2479
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2480

    
2481
    if self.op.hvparams:
2482
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2483
                           "hypervisor", "instance", "cluster")
2484

    
2485
    self.op.disks = self._UpgradeDiskNicMods(
2486
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2487
    self.op.nics = self._UpgradeDiskNicMods(
2488
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2489

    
2490
    if self.op.disks and self.op.disk_template is not None:
2491
      raise errors.OpPrereqError("Disk template conversion and other disk"
2492
                                 " changes not supported at the same time",
2493
                                 errors.ECODE_INVAL)
2494

    
2495
    if (self.op.disk_template and
2496
        self.op.disk_template in constants.DTS_INT_MIRROR and
2497
        self.op.remote_node is None):
2498
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2499
                                 " one requires specifying a secondary node",
2500
                                 errors.ECODE_INVAL)
2501

    
2502
    # Check NIC modifications
2503
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2504
                    self._VerifyNicModification)
2505

    
2506
    if self.op.pnode:
2507
      (self.op.pnode_uuid, self.op.pnode) = \
2508
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2509

    
2510
  def ExpandNames(self):
2511
    self._ExpandAndLockInstance()
2512
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2513
    # Can't even acquire node locks in shared mode as upcoming changes in
2514
    # Ganeti 2.6 will start to modify the node object on disk conversion
2515
    self.needed_locks[locking.LEVEL_NODE] = []
2516
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2517
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2518
    # Look node group to look up the ipolicy
2519
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2520

    
2521
  def DeclareLocks(self, level):
2522
    if level == locking.LEVEL_NODEGROUP:
2523
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2524
      # Acquire locks for the instance's nodegroups optimistically. Needs
2525
      # to be verified in CheckPrereq
2526
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2527
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2528
    elif level == locking.LEVEL_NODE:
2529
      self._LockInstancesNodes()
2530
      if self.op.disk_template and self.op.remote_node:
2531
        (self.op.remote_node_uuid, self.op.remote_node) = \
2532
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2533
                                self.op.remote_node)
2534
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2535
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2536
      # Copy node locks
2537
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2538
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2539

    
2540
  def BuildHooksEnv(self):
2541
    """Build hooks env.
2542

2543
    This runs on the master, primary and secondaries.
2544

2545
    """
2546
    args = {}
2547
    if constants.BE_MINMEM in self.be_new:
2548
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2549
    if constants.BE_MAXMEM in self.be_new:
2550
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2551
    if constants.BE_VCPUS in self.be_new:
2552
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2553
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2554
    # information at all.
2555

    
2556
    if self._new_nics is not None:
2557
      nics = []
2558

    
2559
      for nic in self._new_nics:
2560
        n = copy.deepcopy(nic)
2561
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2562
        n.nicparams = nicparams
2563
        nics.append(NICToTuple(self, n))
2564

    
2565
      args["nics"] = nics
2566

    
2567
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2568
    if self.op.disk_template:
2569
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2570
    if self.op.runtime_mem:
2571
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2572

    
2573
    return env
2574

    
2575
  def BuildHooksNodes(self):
2576
    """Build hooks nodes.
2577

2578
    """
2579
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2580
    return (nl, nl)
2581

    
2582
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2583
                              old_params, cluster, pnode_uuid):
2584

    
2585
    update_params_dict = dict([(key, params[key])
2586
                               for key in constants.NICS_PARAMETERS
2587
                               if key in params])
2588

    
2589
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2590
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2591

    
2592
    new_net_uuid = None
2593
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2594
    if new_net_uuid_or_name:
2595
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2596
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2597

    
2598
    if old_net_uuid:
2599
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2600

    
2601
    if new_net_uuid:
2602
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2603
      if not netparams:
2604
        raise errors.OpPrereqError("No netparams found for the network"
2605
                                   " %s, probably not connected" %
2606
                                   new_net_obj.name, errors.ECODE_INVAL)
2607
      new_params = dict(netparams)
2608
    else:
2609
      new_params = GetUpdatedParams(old_params, update_params_dict)
2610

    
2611
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2612

    
2613
    new_filled_params = cluster.SimpleFillNIC(new_params)
2614
    objects.NIC.CheckParameterSyntax(new_filled_params)
2615

    
2616
    new_mode = new_filled_params[constants.NIC_MODE]
2617
    if new_mode == constants.NIC_MODE_BRIDGED:
2618
      bridge = new_filled_params[constants.NIC_LINK]
2619
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2620
      if msg:
2621
        msg = "Error checking bridges on node '%s': %s" % \
2622
                (self.cfg.GetNodeName(pnode_uuid), msg)
2623
        if self.op.force:
2624
          self.warn.append(msg)
2625
        else:
2626
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2627

    
2628
    elif new_mode == constants.NIC_MODE_ROUTED:
2629
      ip = params.get(constants.INIC_IP, old_ip)
2630
      if ip is None:
2631
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2632
                                   " on a routed NIC", errors.ECODE_INVAL)
2633

    
2634
    elif new_mode == constants.NIC_MODE_OVS:
2635
      # TODO: check OVS link
2636
      self.LogInfo("OVS links are currently not checked for correctness")
2637

    
2638
    if constants.INIC_MAC in params:
2639
      mac = params[constants.INIC_MAC]
2640
      if mac is None:
2641
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2642
                                   errors.ECODE_INVAL)
2643
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2644
        # otherwise generate the MAC address
2645
        params[constants.INIC_MAC] = \
2646
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2647
      else:
2648
        # or validate/reserve the current one
2649
        try:
2650
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2651
        except errors.ReservationError:
2652
          raise errors.OpPrereqError("MAC address '%s' already in use"
2653
                                     " in cluster" % mac,
2654
                                     errors.ECODE_NOTUNIQUE)
2655
    elif new_net_uuid != old_net_uuid:
2656

    
2657
      def get_net_prefix(net_uuid):
2658
        mac_prefix = None
2659
        if net_uuid:
2660
          nobj = self.cfg.GetNetwork(net_uuid)
2661
          mac_prefix = nobj.mac_prefix
2662

    
2663
        return mac_prefix
2664

    
2665
      new_prefix = get_net_prefix(new_net_uuid)
2666
      old_prefix = get_net_prefix(old_net_uuid)
2667
      if old_prefix != new_prefix:
2668
        params[constants.INIC_MAC] = \
2669
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2670

    
2671
    # if there is a change in (ip, network) tuple
2672
    new_ip = params.get(constants.INIC_IP, old_ip)
2673
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2674
      if new_ip:
2675
        # if IP is pool then require a network and generate one IP
2676
        if new_ip.lower() == constants.NIC_IP_POOL:
2677
          if new_net_uuid:
2678
            try:
2679
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2680
            except errors.ReservationError:
2681
              raise errors.OpPrereqError("Unable to get a free IP"
2682
                                         " from the address pool",
2683
                                         errors.ECODE_STATE)
2684
            self.LogInfo("Chose IP %s from network %s",
2685
                         new_ip,
2686
                         new_net_obj.name)
2687
            params[constants.INIC_IP] = new_ip
2688
          else:
2689
            raise errors.OpPrereqError("ip=pool, but no network found",
2690
                                       errors.ECODE_INVAL)
2691
        # Reserve new IP if in the new network if any
2692
        elif new_net_uuid:
2693
          try:
2694
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2695
                               check=self.op.conflicts_check)
2696
            self.LogInfo("Reserving IP %s in network %s",
2697
                         new_ip, new_net_obj.name)
2698
          except errors.ReservationError:
2699
            raise errors.OpPrereqError("IP %s not available in network %s" %
2700
                                       (new_ip, new_net_obj.name),
2701
                                       errors.ECODE_NOTUNIQUE)
2702
        # new network is None so check if new IP is a conflicting IP
2703
        elif self.op.conflicts_check:
2704
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2705

    
2706
      # release old IP if old network is not None
2707
      if old_ip and old_net_uuid:
2708
        try:
2709
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2710
        except errors.AddressPoolError:
2711
          logging.warning("Release IP %s not contained in network %s",
2712
                          old_ip, old_net_obj.name)
2713

    
2714
    # there are no changes in (ip, network) tuple and old network is not None
2715
    elif (old_net_uuid is not None and
2716
          (req_link is not None or req_mode is not None)):
2717
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2718
                                 " a NIC that is connected to a network",
2719
                                 errors.ECODE_INVAL)
2720

    
2721
    private.params = new_params
2722
    private.filled = new_filled_params
2723

    
2724
  def _PreCheckDiskTemplate(self, pnode_info):
2725
    """CheckPrereq checks related to a new disk template."""
2726
    # Arguments are passed to avoid configuration lookups
2727
    pnode_uuid = self.instance.primary_node
2728
    if self.instance.disk_template == self.op.disk_template:
2729
      raise errors.OpPrereqError("Instance already has disk template %s" %
2730
                                 self.instance.disk_template,
2731
                                 errors.ECODE_INVAL)
2732

    
2733
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2734
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2735
                                 " cluster." % self.op.disk_template)
2736

    
2737
    if (self.instance.disk_template,
2738
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2739
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2740
                                 " %s to %s" % (self.instance.disk_template,
2741
                                                self.op.disk_template),
2742
                                 errors.ECODE_INVAL)
2743
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2744
                       msg="cannot change disk template")
2745
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2746
      if self.op.remote_node_uuid == pnode_uuid:
2747
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2748
                                   " as the primary node of the instance" %
2749
                                   self.op.remote_node, errors.ECODE_STATE)
2750
      CheckNodeOnline(self, self.op.remote_node_uuid)
2751
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2752
      # FIXME: here we assume that the old instance type is DT_PLAIN
2753
      assert self.instance.disk_template == constants.DT_PLAIN
2754
      disks = [{constants.IDISK_SIZE: d.size,
2755
                constants.IDISK_VG: d.logical_id[0]}
2756
               for d in self.instance.disks]
2757
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2758
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2759

    
2760
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2761
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2762
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2763
                                                              snode_group)
2764
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2765
                             ignore=self.op.ignore_ipolicy)
2766
      if pnode_info.group != snode_info.group:
2767
        self.LogWarning("The primary and secondary nodes are in two"
2768
                        " different node groups; the disk parameters"
2769
                        " from the first disk's node group will be"
2770
                        " used")
2771

    
2772
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2773
      # Make sure none of the nodes require exclusive storage
2774
      nodes = [pnode_info]
2775
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2776
        assert snode_info
2777
        nodes.append(snode_info)
2778
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2779
      if compat.any(map(has_es, nodes)):
2780
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2781
                  " storage is enabled" % (self.instance.disk_template,
2782
                                           self.op.disk_template))
2783
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2784

    
2785
  def _PreCheckDisks(self, ispec):
2786
    """CheckPrereq checks related to disk changes.
2787

2788
    @type ispec: dict
2789
    @param ispec: instance specs to be updated with the new disks
2790

2791
    """
2792
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2793

    
2794
    excl_stor = compat.any(
2795
      rpc.GetExclusiveStorageForNodes(self.cfg,
2796
                                      self.instance.all_nodes).values()
2797
      )
2798

    
2799
    # Check disk modifications. This is done here and not in CheckArguments
2800
    # (as with NICs), because we need to know the instance's disk template
2801
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2802
    if self.instance.disk_template == constants.DT_EXT:
2803
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2804
    else:
2805
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2806
                      ver_fn)
2807

    
2808
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2809

    
2810
    # Check the validity of the `provider' parameter
2811
    if self.instance.disk_template in constants.DT_EXT:
2812
      for mod in self.diskmod:
2813
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2814
        if mod[0] == constants.DDM_ADD:
2815
          if ext_provider is None:
2816
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2817
                                       " '%s' missing, during disk add" %
2818
                                       (constants.DT_EXT,
2819
                                        constants.IDISK_PROVIDER),
2820
                                       errors.ECODE_NOENT)
2821
        elif mod[0] == constants.DDM_MODIFY:
2822
          if ext_provider:
2823
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2824
                                       " modification" %
2825
                                       constants.IDISK_PROVIDER,
2826
                                       errors.ECODE_INVAL)
2827
    else:
2828
      for mod in self.diskmod:
2829
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2830
        if ext_provider is not None:
2831
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2832
                                     " instances of type '%s'" %
2833
                                     (constants.IDISK_PROVIDER,
2834
                                      constants.DT_EXT),
2835
                                     errors.ECODE_INVAL)
2836

    
2837
    if not self.op.wait_for_sync and self.instance.disks_active:
2838
      for mod in self.diskmod:
2839
        if mod[0] == constants.DDM_ADD:
2840
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2841
                                     " activated disks and"
2842
                                     " --no-wait-for-sync given.",
2843
                                     errors.ECODE_INVAL)
2844

    
2845
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2846
      raise errors.OpPrereqError("Disk operations not supported for"
2847
                                 " diskless instances", errors.ECODE_INVAL)
2848

    
2849
    def _PrepareDiskMod(_, disk, params, __):
2850
      disk.name = params.get(constants.IDISK_NAME, None)
2851

    
2852
    # Verify disk changes (operating on a copy)
2853
    disks = copy.deepcopy(self.instance.disks)
2854
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2855
                        _PrepareDiskMod, None)
2856
    utils.ValidateDeviceNames("disk", disks)
2857
    if len(disks) > constants.MAX_DISKS:
2858
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2859
                                 " more" % constants.MAX_DISKS,
2860
                                 errors.ECODE_STATE)
2861
    disk_sizes = [disk.size for disk in self.instance.disks]
2862
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2863
                      self.diskmod if op == constants.DDM_ADD)
2864
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2865
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2866

    
2867
    if self.op.offline is not None and self.op.offline:
2868
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2869
                         msg="can't change to offline")
2870

    
2871
  def CheckPrereq(self):
2872
    """Check prerequisites.
2873

2874
    This only checks the instance list against the existing names.
2875

2876
    """
2877
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2878
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2879
    self.cluster = self.cfg.GetClusterInfo()
2880
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2881

    
2882
    assert self.instance is not None, \
2883
      "Cannot retrieve locked instance %s" % self.op.instance_name
2884

    
2885
    pnode_uuid = self.instance.primary_node
2886

    
2887
    self.warn = []
2888

    
2889
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2890
        not self.op.force):
2891
      # verify that the instance is not up
2892
      instance_info = self.rpc.call_instance_info(
2893
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2894
          cluster_hvparams)
2895
      if instance_info.fail_msg:
2896
        self.warn.append("Can't get instance runtime information: %s" %
2897
                         instance_info.fail_msg)
2898
      elif instance_info.payload:
2899
        raise errors.OpPrereqError("Instance is still running on %s" %
2900
                                   self.cfg.GetNodeName(pnode_uuid),
2901
                                   errors.ECODE_STATE)
2902

    
2903
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2904
    node_uuids = list(self.instance.all_nodes)
2905
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2906

    
2907
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2908
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2909
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2910

    
2911
    # dictionary with instance information after the modification
2912
    ispec = {}
2913

    
2914
    if self.op.hotplug or self.op.hotplug_if_possible:
2915
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2916
                                               self.instance)
2917
      if result.fail_msg:
2918
        if self.op.hotplug:
2919
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2920
                       prereq=True)
2921
        else:
2922
          self.LogWarning(result.fail_msg)
2923
          self.op.hotplug = False
2924
          self.LogInfo("Modification will take place without hotplugging.")
2925
      else:
2926
        self.op.hotplug = True
2927

    
2928
    # Prepare NIC modifications
2929
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2930

    
2931
    # OS change
2932
    if self.op.os_name and not self.op.force:
2933
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2934
                     self.op.force_variant)
2935
      instance_os = self.op.os_name
2936
    else:
2937
      instance_os = self.instance.os
2938

    
2939
    assert not (self.op.disk_template and self.op.disks), \
2940
      "Can't modify disk template and apply disk changes at the same time"
2941

    
2942
    if self.op.disk_template:
2943
      self._PreCheckDiskTemplate(pnode_info)
2944

    
2945
    self._PreCheckDisks(ispec)
2946

    
2947
    # hvparams processing
2948
    if self.op.hvparams:
2949
      hv_type = self.instance.hypervisor
2950
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2951
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2952
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2953

    
2954
      # local check
2955
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2956
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2957
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2958
      self.hv_inst = i_hvdict # the new dict (without defaults)
2959
    else:
2960
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2961
                                                   self.instance.os,
2962
                                                   self.instance.hvparams)
2963
      self.hv_new = self.hv_inst = {}
2964

    
2965
    # beparams processing
2966
    if self.op.beparams:
2967
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2968
                                  use_none=True)
2969
      objects.UpgradeBeParams(i_bedict)
2970
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2971
      be_new = self.cluster.SimpleFillBE(i_bedict)
2972
      self.be_proposed = self.be_new = be_new # the new actual values
2973
      self.be_inst = i_bedict # the new dict (without defaults)
2974
    else:
2975
      self.be_new = self.be_inst = {}
2976
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2977
    be_old = self.cluster.FillBE(self.instance)
2978

    
2979
    # CPU param validation -- checking every time a parameter is
2980
    # changed to cover all cases where either CPU mask or vcpus have
2981
    # changed
2982
    if (constants.BE_VCPUS in self.be_proposed and
2983
        constants.HV_CPU_MASK in self.hv_proposed):
2984
      cpu_list = \
2985
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2986
      # Verify mask is consistent with number of vCPUs. Can skip this
2987
      # test if only 1 entry in the CPU mask, which means same mask
2988
      # is applied to all vCPUs.
2989
      if (len(cpu_list) > 1 and
2990
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2991
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2992
                                   " CPU mask [%s]" %
2993
                                   (self.be_proposed[constants.BE_VCPUS],
2994
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2995
                                   errors.ECODE_INVAL)
2996

    
2997
      # Only perform this test if a new CPU mask is given
2998
      if constants.HV_CPU_MASK in self.hv_new:
2999
        # Calculate the largest CPU number requested
3000
        max_requested_cpu = max(map(max, cpu_list))
3001
        # Check that all of the instance's nodes have enough physical CPUs to
3002
        # satisfy the requested CPU mask
3003
        hvspecs = [(self.instance.hypervisor,
3004
                    self.cfg.GetClusterInfo()
3005
                      .hvparams[self.instance.hypervisor])]
3006
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3007
                                max_requested_cpu + 1,
3008
                                hvspecs)
3009

    
3010
    # osparams processing
3011
    if self.op.osparams or self.op.osparams_private_cluster:
3012
      public_parms = self.op.osparams or {}
3013
      private_parms = self.op.osparams_private_cluster or {}
3014
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3015

    
3016
      if dupe_keys:
3017
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3018
                                   utils.CommaJoin(dupe_keys))
3019

    
3020
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3021
                                      public_parms)
3022
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3023
                                              private_parms)
3024

    
3025
      CheckOSParams(self, True, node_uuids, instance_os,
3026
                    objects.FillDict(self.os_inst,
3027
                                     self.os_inst_private))
3028

    
3029
    else:
3030
      self.os_inst = {}
3031
      self.os_inst_private = {}
3032

    
3033
    #TODO(dynmem): do the appropriate check involving MINMEM
3034
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3035
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3036
      mem_check_list = [pnode_uuid]
3037
      if be_new[constants.BE_AUTO_BALANCE]:
3038
        # either we changed auto_balance to yes or it was from before
3039
        mem_check_list.extend(self.instance.secondary_nodes)
3040
      instance_info = self.rpc.call_instance_info(
3041
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3042
          cluster_hvparams)
3043
      hvspecs = [(self.instance.hypervisor,
3044
                  cluster_hvparams)]
3045
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3046
                                         hvspecs)
3047
      pninfo = nodeinfo[pnode_uuid]
3048
      msg = pninfo.fail_msg
3049
      if msg:
3050
        # Assume the primary node is unreachable and go ahead
3051
        self.warn.append("Can't get info from primary node %s: %s" %
3052
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3053
      else:
3054
        (_, _, (pnhvinfo, )) = pninfo.payload
3055
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3056
          self.warn.append("Node data from primary node %s doesn't contain"
3057
                           " free memory information" %
3058
                           self.cfg.GetNodeName(pnode_uuid))
3059
        elif instance_info.fail_msg:
3060
          self.warn.append("Can't get instance runtime information: %s" %
3061
                           instance_info.fail_msg)
3062
        else:
3063
          if instance_info.payload:
3064
            current_mem = int(instance_info.payload["memory"])
3065
          else:
3066
            # Assume instance not running
3067
            # (there is a slight race condition here, but it's not very
3068
            # probable, and we have no other way to check)
3069
            # TODO: Describe race condition
3070
            current_mem = 0
3071
          #TODO(dynmem): do the appropriate check involving MINMEM
3072
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3073
                      pnhvinfo["memory_free"])
3074
          if miss_mem > 0:
3075
            raise errors.OpPrereqError("This change will prevent the instance"
3076
                                       " from starting, due to %d MB of memory"
3077
                                       " missing on its primary node" %
3078
                                       miss_mem, errors.ECODE_NORES)
3079

    
3080
      if be_new[constants.BE_AUTO_BALANCE]:
3081
        for node_uuid, nres in nodeinfo.items():
3082
          if node_uuid not in self.instance.secondary_nodes:
3083
            continue
3084
          nres.Raise("Can't get info from secondary node %s" %
3085
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3086
                     ecode=errors.ECODE_STATE)
3087
          (_, _, (nhvinfo, )) = nres.payload
3088
          if not isinstance(nhvinfo.get("memory_free", None), int):
3089
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3090
                                       " memory information" %
3091
                                       self.cfg.GetNodeName(node_uuid),
3092
                                       errors.ECODE_STATE)
3093
          #TODO(dynmem): do the appropriate check involving MINMEM
3094
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3095
            raise errors.OpPrereqError("This change will prevent the instance"
3096
                                       " from failover to its secondary node"
3097
                                       " %s, due to not enough memory" %
3098
                                       self.cfg.GetNodeName(node_uuid),
3099
                                       errors.ECODE_STATE)
3100

    
3101
    if self.op.runtime_mem:
3102
      remote_info = self.rpc.call_instance_info(
3103
         self.instance.primary_node, self.instance.name,
3104
         self.instance.hypervisor,
3105
         cluster_hvparams)
3106
      remote_info.Raise("Error checking node %s" %
3107
                        self.cfg.GetNodeName(self.instance.primary_node))
3108
      if not remote_info.payload: # not running already
3109
        raise errors.OpPrereqError("Instance %s is not running" %
3110
                                   self.instance.name, errors.ECODE_STATE)
3111

    
3112
      current_memory = remote_info.payload["memory"]
3113
      if (not self.op.force and
3114
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3115
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3116
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3117
                                   " and %d MB of memory unless --force is"
3118
                                   " given" %
3119
                                   (self.instance.name,
3120
                                    self.be_proposed[constants.BE_MINMEM],
3121
                                    self.be_proposed[constants.BE_MAXMEM]),
3122
                                   errors.ECODE_INVAL)
3123

    
3124
      delta = self.op.runtime_mem - current_memory
3125
      if delta > 0:
3126
        CheckNodeFreeMemory(
3127
            self, self.instance.primary_node,
3128
            "ballooning memory for instance %s" % self.instance.name, delta,
3129
            self.instance.hypervisor,
3130
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3131

    
3132
    # make self.cluster visible in the functions below
3133
    cluster = self.cluster
3134

    
3135
    def _PrepareNicCreate(_, params, private):
3136
      self._PrepareNicModification(params, private, None, None,
3137
                                   {}, cluster, pnode_uuid)
3138
      return (None, None)
3139

    
3140
    def _PrepareNicMod(_, nic, params, private):
3141
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3142
                                   nic.nicparams, cluster, pnode_uuid)
3143
      return None
3144

    
3145
    def _PrepareNicRemove(_, params, __):
3146
      ip = params.ip
3147
      net = params.network
3148
      if net is not None and ip is not None:
3149
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3150

    
3151
    # Verify NIC changes (operating on copy)
3152
    nics = self.instance.nics[:]
3153
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3154
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3155
    if len(nics) > constants.MAX_NICS:
3156
      raise errors.OpPrereqError("Instance has too many network interfaces"
3157
                                 " (%d), cannot add more" % constants.MAX_NICS,
3158
                                 errors.ECODE_STATE)
3159

    
3160
    # Pre-compute NIC changes (necessary to use result in hooks)
3161
    self._nic_chgdesc = []
3162
    if self.nicmod:
3163
      # Operate on copies as this is still in prereq
3164
      nics = [nic.Copy() for nic in self.instance.nics]
3165
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3166
                          self._CreateNewNic, self._ApplyNicMods,
3167
                          self._RemoveNic)
3168
      # Verify that NIC names are unique and valid
3169
      utils.ValidateDeviceNames("NIC", nics)
3170
      self._new_nics = nics
3171
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3172
    else:
3173
      self._new_nics = None
3174
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3175

    
3176
    if not self.op.ignore_ipolicy:
3177
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3178
                                                              group_info)
3179

    
3180
      # Fill ispec with backend parameters
3181
      ispec[constants.ISPEC_SPINDLE_USE] = \
3182
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3183
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3184
                                                         None)
3185

    
3186
      # Copy ispec to verify parameters with min/max values separately
3187
      if self.op.disk_template:
3188
        new_disk_template = self.op.disk_template
3189
      else:
3190
        new_disk_template = self.instance.disk_template
3191
      ispec_max = ispec.copy()
3192
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3193
        self.be_new.get(constants.BE_MAXMEM, None)
3194
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3195
                                                     new_disk_template)
3196
      ispec_min = ispec.copy()
3197
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3198
        self.be_new.get(constants.BE_MINMEM, None)
3199
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3200
                                                     new_disk_template)
3201

    
3202
      if (res_max or res_min):
3203
        # FIXME: Improve error message by including information about whether
3204
        # the upper or lower limit of the parameter fails the ipolicy.
3205
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3206
               (group_info, group_info.name,
3207
                utils.CommaJoin(set(res_max + res_min))))
3208
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3209

    
3210
  def _ConvertPlainToDrbd(self, feedback_fn):
3211
    """Converts an instance from plain to drbd.
3212

3213
    """
3214
    feedback_fn("Converting template to drbd")
3215
    pnode_uuid = self.instance.primary_node
3216
    snode_uuid = self.op.remote_node_uuid
3217

    
3218
    assert self.instance.disk_template == constants.DT_PLAIN
3219

    
3220
    # create a fake disk info for _GenerateDiskTemplate
3221
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3222
                  constants.IDISK_VG: d.logical_id[0],
3223
                  constants.IDISK_NAME: d.name}
3224
                 for d in self.instance.disks]
3225
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3226
                                     self.instance.uuid, pnode_uuid,
3227
                                     [snode_uuid], disk_info, None, None, 0,
3228
                                     feedback_fn, self.diskparams)
3229
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3230
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3231
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3232
    info = GetInstanceInfoText(self.instance)
3233
    feedback_fn("Creating additional volumes...")
3234
    # first, create the missing data and meta devices
3235
    for disk in anno_disks:
3236
      # unfortunately this is... not too nice
3237
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3238
                           info, True, p_excl_stor)
3239
      for child in disk.children:
3240
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3241
                             s_excl_stor)
3242
    # at this stage, all new LVs have been created, we can rename the
3243
    # old ones
3244
    feedback_fn("Renaming original volumes...")
3245
    rename_list = [(o, n.children[0].logical_id)
3246
                   for (o, n) in zip(self.instance.disks, new_disks)]
3247
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3248
    result.Raise("Failed to rename original LVs")
3249

    
3250
    feedback_fn("Initializing DRBD devices...")
3251
    # all child devices are in place, we can now create the DRBD devices
3252
    try:
3253
      for disk in anno_disks:
3254
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3255
                                       (snode_uuid, s_excl_stor)]:
3256
          f_create = node_uuid == pnode_uuid
3257
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3258
                               f_create, excl_stor)
3259
    except errors.GenericError, e:
3260
      feedback_fn("Initializing of DRBD devices failed;"
3261
                  " renaming back original volumes...")
3262
      rename_back_list = [(n.children[0], o.logical_id)
3263
                          for (n, o) in zip(new_disks, self.instance.disks)]
3264
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3265
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3266
      raise
3267

    
3268
    # at this point, the instance has been modified
3269
    self.instance.disk_template = constants.DT_DRBD8
3270
    self.instance.disks = new_disks
3271
    self.cfg.Update(self.instance, feedback_fn)
3272

    
3273
    # Release node locks while waiting for sync
3274
    ReleaseLocks(self, locking.LEVEL_NODE)
3275

    
3276
    # disks are created, waiting for sync
3277
    disk_abort = not WaitForSync(self, self.instance,
3278
                                 oneshot=not self.op.wait_for_sync)
3279
    if disk_abort:
3280
      raise errors.OpExecError("There are some degraded disks for"
3281
                               " this instance, please cleanup manually")
3282

    
3283
    # Node resource locks will be released by caller
3284

    
3285
  def _ConvertDrbdToPlain(self, feedback_fn):
3286
    """Converts an instance from drbd to plain.
3287

3288
    """
3289
    assert len(self.instance.secondary_nodes) == 1
3290
    assert self.instance.disk_template == constants.DT_DRBD8
3291

    
3292
    pnode_uuid = self.instance.primary_node
3293
    snode_uuid = self.instance.secondary_nodes[0]
3294
    feedback_fn("Converting template to plain")
3295

    
3296
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3297
    new_disks = [d.children[0] for d in self.instance.disks]
3298

    
3299
    # copy over size, mode and name
3300
    for parent, child in zip(old_disks, new_disks):
3301
      child.size = parent.size
3302
      child.mode = parent.mode
3303
      child.name = parent.name
3304

    
3305
    # this is a DRBD disk, return its port to the pool
3306
    # NOTE: this must be done right before the call to cfg.Update!
3307
    for disk in old_disks:
3308
      tcp_port = disk.logical_id[2]
3309
      self.cfg.AddTcpUdpPort(tcp_port)
3310

    
3311
    # update instance structure
3312
    self.instance.disks = new_disks
3313
    self.instance.disk_template = constants.DT_PLAIN
3314
    _UpdateIvNames(0, self.instance.disks)
3315
    self.cfg.Update(self.instance, feedback_fn)
3316

    
3317
    # Release locks in case removing disks takes a while
3318
    ReleaseLocks(self, locking.LEVEL_NODE)
3319

    
3320
    feedback_fn("Removing volumes on the secondary node...")
3321
    for disk in old_disks:
3322
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3323
      result.Warn("Could not remove block device %s on node %s,"
3324
                  " continuing anyway" %
3325
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3326
                  self.LogWarning)
3327

    
3328
    feedback_fn("Removing unneeded volumes on the primary node...")
3329
    for idx, disk in enumerate(old_disks):
3330
      meta = disk.children[1]
3331
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3332
      result.Warn("Could not remove metadata for disk %d on node %s,"
3333
                  " continuing anyway" %
3334
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3335
                  self.LogWarning)
3336

    
3337
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3338
    self.LogInfo("Trying to hotplug device...")
3339
    msg = "hotplug:"
3340
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3341
                                          self.instance, action, dev_type,
3342
                                          (device, self.instance),
3343
                                          extra, seq)
3344
    if result.fail_msg:
3345
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3346
      self.LogInfo("Continuing execution..")
3347
      msg += "failed"
3348
    else:
3349
      self.LogInfo("Hotplug done.")
3350
      msg += "done"
3351
    return msg
3352

    
3353
  def _CreateNewDisk(self, idx, params, _):
3354
    """Creates a new disk.
3355

3356
    """
3357
    # add a new disk
3358
    if self.instance.disk_template in constants.DTS_FILEBASED:
3359
      (file_driver, file_path) = self.instance.disks[0].logical_id
3360
      file_path = os.path.dirname(file_path)
3361
    else:
3362
      file_driver = file_path = None
3363

    
3364
    disk = \
3365
      GenerateDiskTemplate(self, self.instance.disk_template,
3366
                           self.instance.uuid, self.instance.primary_node,
3367
                           self.instance.secondary_nodes, [params], file_path,
3368
                           file_driver, idx, self.Log, self.diskparams)[0]
3369

    
3370
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3371

    
3372
    if self.cluster.prealloc_wipe_disks:
3373
      # Wipe new disk
3374
      WipeOrCleanupDisks(self, self.instance,
3375
                         disks=[(idx, disk, 0)],
3376
                         cleanup=new_disks)
3377

    
3378
    changes = [
3379
      ("disk/%d" % idx,
3380
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3381
      ]
3382
    if self.op.hotplug:
3383
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3384
                                               (disk, self.instance),
3385
                                               self.instance.name, True, idx)
3386
      if result.fail_msg:
3387
        changes.append(("disk/%d" % idx, "assemble:failed"))
3388
        self.LogWarning("Can't assemble newly created disk %d: %s",
3389
                        idx, result.fail_msg)
3390
      else:
3391
        _, link_name = result.payload
3392
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3393
                                  constants.HOTPLUG_TARGET_DISK,
3394
                                  disk, link_name, idx)
3395
        changes.append(("disk/%d" % idx, msg))
3396

    
3397
    return (disk, changes)
3398

    
3399
  def _PostAddDisk(self, _, disk):
3400
    if not WaitForSync(self, self.instance, disks=[disk],
3401
                       oneshot=not self.op.wait_for_sync):
3402
      raise errors.OpExecError("Failed to sync disks of %s" %
3403
                               self.instance.name)
3404

    
3405
    # the disk is active at this point, so deactivate it if the instance disks
3406
    # are supposed to be inactive
3407
    if not self.instance.disks_active:
3408
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3409

    
3410
  def _ModifyDisk(self, idx, disk, params, _):
3411
    """Modifies a disk.
3412

3413
    """
3414
    changes = []
3415
    if constants.IDISK_MODE in params:
3416
      disk.mode = params.get(constants.IDISK_MODE)
3417
      changes.append(("disk.mode/%d" % idx, disk.mode))
3418

    
3419
    if constants.IDISK_NAME in params:
3420
      disk.name = params.get(constants.IDISK_NAME)
3421
      changes.append(("disk.name/%d" % idx, disk.name))
3422

    
3423
    # Modify arbitrary params in case instance template is ext
3424
    for key, value in params.iteritems():
3425
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3426
          self.instance.disk_template == constants.DT_EXT):
3427
        # stolen from GetUpdatedParams: default means reset/delete
3428
        if value.lower() == constants.VALUE_DEFAULT:
3429
          try:
3430
            del disk.params[key]
3431
          except KeyError:
3432
            pass
3433
        else:
3434
          disk.params[key] = value
3435
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3436

    
3437
    return changes
3438

    
3439
  def _RemoveDisk(self, idx, root, _):
3440
    """Removes a disk.
3441

3442
    """
3443
    hotmsg = ""
3444
    if self.op.hotplug:
3445
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3446
                                   constants.HOTPLUG_TARGET_DISK,
3447
                                   root, None, idx)
3448
      ShutdownInstanceDisks(self, self.instance, [root])
3449

    
3450
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3451
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3452
                             self.instance.primary_node):
3453
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3454
              .fail_msg
3455
      if msg:
3456
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3457
                        " continuing anyway", idx,
3458
                        self.cfg.GetNodeName(node_uuid), msg)
3459

    
3460
    # if this is a DRBD disk, return its port to the pool
3461
    if root.dev_type in constants.DTS_DRBD:
3462
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3463

    
3464
    return hotmsg
3465

    
3466
  def _CreateNewNic(self, idx, params, private):
3467
    """Creates data structure for a new network interface.
3468

3469
    """
3470
    mac = params[constants.INIC_MAC]
3471
    ip = params.get(constants.INIC_IP, None)
3472
    net = params.get(constants.INIC_NETWORK, None)
3473
    name = params.get(constants.INIC_NAME, None)
3474
    net_uuid = self.cfg.LookupNetwork(net)
3475
    #TODO: not private.filled?? can a nic have no nicparams??
3476
    nicparams = private.filled
3477
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3478
                       nicparams=nicparams)
3479
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3480

    
3481
    changes = [
3482
      ("nic.%d" % idx,
3483
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3484
       (mac, ip, private.filled[constants.NIC_MODE],
3485
       private.filled[constants.NIC_LINK], net)),
3486
      ]
3487

    
3488
    if self.op.hotplug:
3489
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3490
                                constants.HOTPLUG_TARGET_NIC,
3491
                                nobj, None, idx)
3492
      changes.append(("nic.%d" % idx, msg))
3493

    
3494
    return (nobj, changes)
3495

    
3496
  def _ApplyNicMods(self, idx, nic, params, private):
3497
    """Modifies a network interface.
3498

3499
    """
3500
    changes = []
3501

    
3502
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3503
      if key in params:
3504
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3505
        setattr(nic, key, params[key])
3506

    
3507
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3508
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3509
    if new_net_uuid != nic.network:
3510
      changes.append(("nic.network/%d" % idx, new_net))
3511
      nic.network = new_net_uuid
3512

    
3513
    if private.filled:
3514
      nic.nicparams = private.filled
3515

    
3516
      for (key, val) in nic.nicparams.items():
3517
        changes.append(("nic.%s/%d" % (key, idx), val))
3518

    
3519
    if self.op.hotplug:
3520
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3521
                                constants.HOTPLUG_TARGET_NIC,
3522
                                nic, None, idx)
3523
      changes.append(("nic/%d" % idx, msg))
3524

    
3525
    return changes
3526

    
3527
  def _RemoveNic(self, idx, nic, _):
3528
    if self.op.hotplug:
3529
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3530
                                 constants.HOTPLUG_TARGET_NIC,
3531
                                 nic, None, idx)
3532

    
3533
  def Exec(self, feedback_fn):
3534
    """Modifies an instance.
3535

3536
    All parameters take effect only at the next restart of the instance.
3537

3538
    """
3539
    # Process here the warnings from CheckPrereq, as we don't have a
3540
    # feedback_fn there.
3541
    # TODO: Replace with self.LogWarning
3542
    for warn in self.warn:
3543
      feedback_fn("WARNING: %s" % warn)
3544

    
3545
    assert ((self.op.disk_template is None) ^
3546
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3547
      "Not owning any node resource locks"
3548

    
3549
    result = []
3550

    
3551
    # New primary node
3552
    if self.op.pnode_uuid:
3553
      self.instance.primary_node = self.op.pnode_uuid
3554

    
3555
    # runtime memory
3556
    if self.op.runtime_mem:
3557
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3558
                                                     self.instance,
3559
                                                     self.op.runtime_mem)
3560
      rpcres.Raise("Cannot modify instance runtime memory")
3561
      result.append(("runtime_memory", self.op.runtime_mem))
3562

    
3563
    # Apply disk changes
3564
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3565
                        self._CreateNewDisk, self._ModifyDisk,
3566
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3567
    _UpdateIvNames(0, self.instance.disks)
3568

    
3569
    if self.op.disk_template:
3570
      if __debug__:
3571
        check_nodes = set(self.instance.all_nodes)
3572
        if self.op.remote_node_uuid:
3573
          check_nodes.add(self.op.remote_node_uuid)
3574
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3575
          owned = self.owned_locks(level)
3576
          assert not (check_nodes - owned), \
3577
            ("Not owning the correct locks, owning %r, expected at least %r" %
3578
             (owned, check_nodes))
3579

    
3580
      r_shut = ShutdownInstanceDisks(self, self.instance)
3581
      if not r_shut:
3582
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3583
                                 " proceed with disk template conversion")
3584
      mode = (self.instance.disk_template, self.op.disk_template)
3585
      try:
3586
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3587
      except:
3588
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3589
        raise
3590
      result.append(("disk_template", self.op.disk_template))
3591

    
3592
      assert self.instance.disk_template == self.op.disk_template, \
3593
        ("Expected disk template '%s', found '%s'" %
3594
         (self.op.disk_template, self.instance.disk_template))
3595

    
3596
    # Release node and resource locks if there are any (they might already have
3597
    # been released during disk conversion)
3598
    ReleaseLocks(self, locking.LEVEL_NODE)
3599
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3600

    
3601
    # Apply NIC changes
3602
    if self._new_nics is not None:
3603
      self.instance.nics = self._new_nics
3604
      result.extend(self._nic_chgdesc)
3605

    
3606
    # hvparams changes
3607
    if self.op.hvparams:
3608
      self.instance.hvparams = self.hv_inst
3609
      for key, val in self.op.hvparams.iteritems():
3610
        result.append(("hv/%s" % key, val))
3611

    
3612
    # beparams changes
3613
    if self.op.beparams:
3614
      self.instance.beparams = self.be_inst
3615
      for key, val in self.op.beparams.iteritems():
3616
        result.append(("be/%s" % key, val))
3617

    
3618
    # OS change
3619
    if self.op.os_name:
3620
      self.instance.os = self.op.os_name
3621

    
3622
    # osparams changes
3623
    if self.op.osparams:
3624
      self.instance.osparams = self.os_inst
3625
      for key, val in self.op.osparams.iteritems():
3626
        result.append(("os/%s" % key, val))
3627

    
3628
    if self.op.osparams_private:
3629
      self.instance.osparams_private = self.os_inst_private
3630
      for key, val in self.op.osparams_private.iteritems():
3631
        # Show the Private(...) blurb.
3632
        result.append(("os_private/%s" % key, repr(val)))
3633

    
3634
    if self.op.offline is None:
3635
      # Ignore
3636
      pass
3637
    elif self.op.offline:
3638
      # Mark instance as offline
3639
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3640
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3641
    else:
3642
      # Mark instance as online, but stopped
3643
      self.cfg.MarkInstanceDown(self.instance.uuid)
3644
      result.append(("admin_state", constants.ADMINST_DOWN))
3645

    
3646
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3647

    
3648
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3649
                self.owned_locks(locking.LEVEL_NODE)), \
3650
      "All node locks should have been released by now"
3651

    
3652
    return result
3653

    
3654
  _DISK_CONVERSIONS = {
3655
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3656
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3657
    }
3658

    
3659

    
3660
class LUInstanceChangeGroup(LogicalUnit):
3661
  HPATH = "instance-change-group"
3662
  HTYPE = constants.HTYPE_INSTANCE
3663
  REQ_BGL = False
3664

    
3665
  def ExpandNames(self):
3666
    self.share_locks = ShareAll()
3667

    
3668
    self.needed_locks = {
3669
      locking.LEVEL_NODEGROUP: [],
3670
      locking.LEVEL_NODE: [],
3671
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3672
      }
3673

    
3674
    self._ExpandAndLockInstance()
3675

    
3676
    if self.op.target_groups:
3677
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3678
                                  self.op.target_groups)
3679
    else:
3680
      self.req_target_uuids = None
3681

    
3682
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3683

    
3684
  def DeclareLocks(self, level):
3685
    if level == locking.LEVEL_NODEGROUP:
3686
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3687

    
3688
      if self.req_target_uuids:
3689
        lock_groups = set(self.req_target_uuids)
3690

    
3691
        # Lock all groups used by instance optimistically; this requires going
3692
        # via the node before it's locked, requiring verification later on
3693
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3694
        lock_groups.update(instance_groups)
3695
      else:
3696
        # No target groups, need to lock all of them
3697
        lock_groups = locking.ALL_SET
3698

    
3699
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3700

    
3701
    elif level == locking.LEVEL_NODE:
3702
      if self.req_target_uuids:
3703
        # Lock all nodes used by instances
3704
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3705
        self._LockInstancesNodes()
3706

    
3707
        # Lock all nodes in all potential target groups
3708
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3709
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3710
        member_nodes = [node_uuid
3711
                        for group in lock_groups
3712
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3713
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3714
      else:
3715
        # Lock all nodes as all groups are potential targets
3716
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3717

    
3718
  def CheckPrereq(self):
3719
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3720
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3721
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3722

    
3723
    assert (self.req_target_uuids is None or
3724
            owned_groups.issuperset(self.req_target_uuids))
3725
    assert owned_instance_names == set([self.op.instance_name])
3726

    
3727
    # Get instance information
3728
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3729

    
3730
    # Check if node groups for locked instance are still correct
3731
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3732
      ("Instance %s's nodes changed while we kept the lock" %
3733
       self.op.instance_name)
3734

    
3735
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3736
                                          owned_groups)
3737

    
3738
    if self.req_target_uuids:
3739
      # User requested specific target groups
3740
      self.target_uuids = frozenset(self.req_target_uuids)
3741
    else:
3742
      # All groups except those used by the instance are potential targets
3743
      self.target_uuids = owned_groups - inst_groups
3744

    
3745
    conflicting_groups = self.target_uuids & inst_groups
3746
    if conflicting_groups:
3747
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3748
                                 " used by the instance '%s'" %
3749
                                 (utils.CommaJoin(conflicting_groups),
3750
                                  self.op.instance_name),
3751
                                 errors.ECODE_INVAL)
3752

    
3753
    if not self.target_uuids:
3754
      raise errors.OpPrereqError("There are no possible target groups",
3755
                                 errors.ECODE_INVAL)
3756

    
3757
  def BuildHooksEnv(self):
3758
    """Build hooks env.
3759

3760
    """
3761
    assert self.target_uuids
3762

    
3763
    env = {
3764
      "TARGET_GROUPS": " ".join(self.target_uuids),
3765
      }
3766

    
3767
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3768

    
3769
    return env
3770

    
3771
  def BuildHooksNodes(self):
3772
    """Build hooks nodes.
3773

3774
    """
3775
    mn = self.cfg.GetMasterNode()
3776
    return ([mn], [mn])
3777

    
3778
  def Exec(self, feedback_fn):
3779
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3780

    
3781
    assert instances == [self.op.instance_name], "Instance not locked"
3782

    
3783
    req = iallocator.IAReqGroupChange(instances=instances,
3784
                                      target_groups=list(self.target_uuids))
3785
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3786

    
3787
    ial.Run(self.op.iallocator)
3788

    
3789
    if not ial.success:
3790
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3791
                                 " instance '%s' using iallocator '%s': %s" %
3792
                                 (self.op.instance_name, self.op.iallocator,
3793
                                  ial.info), errors.ECODE_NORES)
3794

    
3795
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3796

    
3797
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3798
                 " instance '%s'", len(jobs), self.op.instance_name)
3799

    
3800
    return ResultWithJobs(jobs)