Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ ec3a7362

History | View | Annotate | Download (154.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
import ganeti.rpc.node as rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177
    vlan = nic.get(constants.INIC_VLAN, None)
178

    
179
    if net is None or net.lower() == constants.VALUE_NONE:
180
      net = None
181
    else:
182
      if nic_mode_req is not None or link is not None:
183
        raise errors.OpPrereqError("If network is given, no mode or link"
184
                                   " is allowed to be passed",
185
                                   errors.ECODE_INVAL)
186

    
187
    # ip validity checks
188
    if ip is None or ip.lower() == constants.VALUE_NONE:
189
      nic_ip = None
190
    elif ip.lower() == constants.VALUE_AUTO:
191
      if not op.name_check:
192
        raise errors.OpPrereqError("IP address set to auto but name checks"
193
                                   " have been skipped",
194
                                   errors.ECODE_INVAL)
195
      nic_ip = default_ip
196
    else:
197
      # We defer pool operations until later, so that the iallocator has
198
      # filled in the instance's node(s) dimara
199
      if ip.lower() == constants.NIC_IP_POOL:
200
        if net is None:
201
          raise errors.OpPrereqError("if ip=pool, parameter network"
202
                                     " must be passed too",
203
                                     errors.ECODE_INVAL)
204

    
205
      elif not netutils.IPAddress.IsValid(ip):
206
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
207
                                   errors.ECODE_INVAL)
208

    
209
      nic_ip = ip
210

    
211
    # TODO: check the ip address for uniqueness
212
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
213
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
214
                                 errors.ECODE_INVAL)
215

    
216
    # MAC address verification
217
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
218
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
219
      mac = utils.NormalizeAndValidateMac(mac)
220

    
221
      try:
222
        # TODO: We need to factor this out
223
        cfg.ReserveMAC(mac, ec_id)
224
      except errors.ReservationError:
225
        raise errors.OpPrereqError("MAC address %s already in use"
226
                                   " in cluster" % mac,
227
                                   errors.ECODE_NOTUNIQUE)
228

    
229
    #  Build nic parameters
230
    nicparams = {}
231
    if nic_mode_req:
232
      nicparams[constants.NIC_MODE] = nic_mode
233
    if link:
234
      nicparams[constants.NIC_LINK] = link
235
    if vlan:
236
      nicparams[constants.NIC_VLAN] = vlan
237

    
238
    check_params = cluster.SimpleFillNIC(nicparams)
239
    objects.NIC.CheckParameterSyntax(check_params)
240
    net_uuid = cfg.LookupNetwork(net)
241
    name = nic.get(constants.INIC_NAME, None)
242
    if name is not None and name.lower() == constants.VALUE_NONE:
243
      name = None
244
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
245
                          network=net_uuid, nicparams=nicparams)
246
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247
    nics.append(nic_obj)
248

    
249
  return nics
250

    
251

    
252
def _CheckForConflictingIp(lu, ip, node_uuid):
253
  """In case of conflicting IP address raise error.
254

255
  @type ip: string
256
  @param ip: IP address
257
  @type node_uuid: string
258
  @param node_uuid: node UUID
259

260
  """
261
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
262
  if conf_net is not None:
263
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
264
                                " network %s, but the target NIC does not." %
265
                                (ip, conf_net)),
266
                               errors.ECODE_STATE)
267

    
268
  return (None, None)
269

    
270

    
271
def _ComputeIPolicyInstanceSpecViolation(
272
  ipolicy, instance_spec, disk_template,
273
  _compute_fn=ComputeIPolicySpecViolation):
274
  """Compute if instance specs meets the specs of ipolicy.
275

276
  @type ipolicy: dict
277
  @param ipolicy: The ipolicy to verify against
278
  @param instance_spec: dict
279
  @param instance_spec: The instance spec to verify
280
  @type disk_template: string
281
  @param disk_template: the disk template of the instance
282
  @param _compute_fn: The function to verify ipolicy (unittest only)
283
  @see: L{ComputeIPolicySpecViolation}
284

285
  """
286
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
287
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
288
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
289
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
290
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
291
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
292

    
293
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
294
                     disk_sizes, spindle_use, disk_template)
295

    
296

    
297
def _ComputeInstanceCommunicationNIC(instance_name):
298
  """Compute the name of the instance NIC used by instance
299
  communication.
300

301
  With instance communication, a new NIC is added to the instance.
302
  This NIC has a special name that identities it as being part of
303
  instance communication, and not just a normal NIC.  This function
304
  generates the name of the NIC based on a prefix and the instance
305
  name
306

307
  @type instance_name: string
308
  @param instance_name: name of the instance the NIC belongs to
309

310
  @rtype: string
311
  @return: name of the NIC
312

313
  """
314
  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def _CheckDiskTemplateValid(self):
326
    """Checks validity of disk template.
327

328
    """
329
    cluster = self.cfg.GetClusterInfo()
330
    if self.op.disk_template is None:
331
      # FIXME: It would be better to take the default disk template from the
332
      # ipolicy, but for the ipolicy we need the primary node, which we get from
333
      # the iallocator, which wants the disk template as input. To solve this
334
      # chicken-and-egg problem, it should be possible to specify just a node
335
      # group from the iallocator and take the ipolicy from that.
336
      self.op.disk_template = cluster.enabled_disk_templates[0]
337
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
338

    
339
  def _CheckDiskArguments(self):
340
    """Checks validity of disk-related arguments.
341

342
    """
343
    # check that disk's names are unique and valid
344
    utils.ValidateDeviceNames("disk", self.op.disks)
345

    
346
    self._CheckDiskTemplateValid()
347

    
348
    # check disks. parameter names and consistent adopt/no-adopt strategy
349
    has_adopt = has_no_adopt = False
350
    for disk in self.op.disks:
351
      if self.op.disk_template != constants.DT_EXT:
352
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
353
      if constants.IDISK_ADOPT in disk:
354
        has_adopt = True
355
      else:
356
        has_no_adopt = True
357
    if has_adopt and has_no_adopt:
358
      raise errors.OpPrereqError("Either all disks are adopted or none is",
359
                                 errors.ECODE_INVAL)
360
    if has_adopt:
361
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
362
        raise errors.OpPrereqError("Disk adoption is not supported for the"
363
                                   " '%s' disk template" %
364
                                   self.op.disk_template,
365
                                   errors.ECODE_INVAL)
366
      if self.op.iallocator is not None:
367
        raise errors.OpPrereqError("Disk adoption not allowed with an"
368
                                   " iallocator script", errors.ECODE_INVAL)
369
      if self.op.mode == constants.INSTANCE_IMPORT:
370
        raise errors.OpPrereqError("Disk adoption not allowed for"
371
                                   " instance import", errors.ECODE_INVAL)
372
    else:
373
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
374
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
375
                                   " but no 'adopt' parameter given" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378

    
379
    self.adopt_disks = has_adopt
380

    
381
  def _CheckVLANArguments(self):
382
    """ Check validity of VLANs if given
383

384
    """
385
    for nic in self.op.nics:
386
      vlan = nic.get(constants.INIC_VLAN, None)
387
      if vlan:
388
        if vlan[0] == ".":
389
          # vlan starting with dot means single untagged vlan,
390
          # might be followed by trunk (:)
391
          if not vlan[1:].isdigit():
392
            vlanlist = vlan[1:].split(':')
393
            for vl in vlanlist:
394
              if not vl.isdigit():
395
                raise errors.OpPrereqError("Specified VLAN parameter is "
396
                                           "invalid : %s" % vlan,
397
                                             errors.ECODE_INVAL)
398
        elif vlan[0] == ":":
399
          # Trunk - tagged only
400
          vlanlist = vlan[1:].split(':')
401
          for vl in vlanlist:
402
            if not vl.isdigit():
403
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
404
                                           " : %s" % vlan, errors.ECODE_INVAL)
405
        elif vlan.isdigit():
406
          # This is the simplest case. No dots, only single digit
407
          # -> Create untagged access port, dot needs to be added
408
          nic[constants.INIC_VLAN] = "." + vlan
409
        else:
410
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                       " : %s" % vlan, errors.ECODE_INVAL)
412

    
413
  def CheckArguments(self):
414
    """Check arguments.
415

416
    """
417
    # do not require name_check to ease forward/backward compatibility
418
    # for tools
419
    if self.op.no_install and self.op.start:
420
      self.LogInfo("No-installation mode selected, disabling startup")
421
      self.op.start = False
422
    # validate/normalize the instance name
423
    self.op.instance_name = \
424
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
425

    
426
    if self.op.ip_check and not self.op.name_check:
427
      # TODO: make the ip check more flexible and not depend on the name check
428
      raise errors.OpPrereqError("Cannot do IP address check without a name"
429
                                 " check", errors.ECODE_INVAL)
430

    
431
    # add NIC for instance communication
432
    if self.op.instance_communication:
433
      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
434

    
435
      self.op.nics.append({constants.INIC_NAME: nic_name,
436
                           constants.INIC_MAC: constants.VALUE_GENERATE,
437
                           constants.INIC_IP: constants.NIC_IP_POOL,
438
                           constants.INIC_NETWORK:
439
                             self.cfg.GetInstanceCommunicationNetwork()})
440

    
441
    # check nics' parameter names
442
    for nic in self.op.nics:
443
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
444
    # check that NIC's parameters names are unique and valid
445
    utils.ValidateDeviceNames("NIC", self.op.nics)
446

    
447
    self._CheckVLANArguments()
448

    
449
    self._CheckDiskArguments()
450
    assert self.op.disk_template is not None
451

    
452
    # instance name verification
453
    if self.op.name_check:
454
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
455
      self.op.instance_name = self.hostname.name
456
      # used in CheckPrereq for ip ping check
457
      self.check_ip = self.hostname.ip
458
    else:
459
      self.check_ip = None
460

    
461
    # file storage checks
462
    if (self.op.file_driver and
463
        not self.op.file_driver in constants.FILE_DRIVER):
464
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
465
                                 self.op.file_driver, errors.ECODE_INVAL)
466

    
467
    # set default file_driver if unset and required
468
    if (not self.op.file_driver and
469
        self.op.disk_template in constants.DTS_FILEBASED):
470
      self.op.file_driver = constants.FD_LOOP
471

    
472
    ### Node/iallocator related checks
473
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
474

    
475
    if self.op.pnode is not None:
476
      if self.op.disk_template in constants.DTS_INT_MIRROR:
477
        if self.op.snode is None:
478
          raise errors.OpPrereqError("The networked disk templates need"
479
                                     " a mirror node", errors.ECODE_INVAL)
480
      elif self.op.snode:
481
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
482
                        " template")
483
        self.op.snode = None
484

    
485
    _CheckOpportunisticLocking(self.op)
486

    
487
    if self.op.mode == constants.INSTANCE_IMPORT:
488
      # On import force_variant must be True, because if we forced it at
489
      # initial install, our only chance when importing it back is that it
490
      # works again!
491
      self.op.force_variant = True
492

    
493
      if self.op.no_install:
494
        self.LogInfo("No-installation mode has no effect during import")
495

    
496
    elif self.op.mode == constants.INSTANCE_CREATE:
497
      if self.op.os_type is None:
498
        raise errors.OpPrereqError("No guest OS specified",
499
                                   errors.ECODE_INVAL)
500
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
501
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
502
                                   " installation" % self.op.os_type,
503
                                   errors.ECODE_STATE)
504
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
505
      self._cds = GetClusterDomainSecret()
506

    
507
      # Check handshake to ensure both clusters have the same domain secret
508
      src_handshake = self.op.source_handshake
509
      if not src_handshake:
510
        raise errors.OpPrereqError("Missing source handshake",
511
                                   errors.ECODE_INVAL)
512

    
513
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
514
                                                           src_handshake)
515
      if errmsg:
516
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
517
                                   errors.ECODE_INVAL)
518

    
519
      # Load and check source CA
520
      self.source_x509_ca_pem = self.op.source_x509_ca
521
      if not self.source_x509_ca_pem:
522
        raise errors.OpPrereqError("Missing source X509 CA",
523
                                   errors.ECODE_INVAL)
524

    
525
      try:
526
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
527
                                                    self._cds)
528
      except OpenSSL.crypto.Error, err:
529
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
530
                                   (err, ), errors.ECODE_INVAL)
531

    
532
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
533
      if errcode is not None:
534
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
535
                                   errors.ECODE_INVAL)
536

    
537
      self.source_x509_ca = cert
538

    
539
      src_instance_name = self.op.source_instance_name
540
      if not src_instance_name:
541
        raise errors.OpPrereqError("Missing source instance name",
542
                                   errors.ECODE_INVAL)
543

    
544
      self.source_instance_name = \
545
        netutils.GetHostname(name=src_instance_name).name
546

    
547
    else:
548
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
549
                                 self.op.mode, errors.ECODE_INVAL)
550

    
551
  def ExpandNames(self):
552
    """ExpandNames for CreateInstance.
553

554
    Figure out the right locks for instance creation.
555

556
    """
557
    self.needed_locks = {}
558

    
559
    # this is just a preventive check, but someone might still add this
560
    # instance in the meantime, and creation will fail at lock-add time
561
    if self.op.instance_name in\
562
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
563
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
564
                                 self.op.instance_name, errors.ECODE_EXISTS)
565

    
566
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
567

    
568
    if self.op.iallocator:
569
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
570
      # specifying a group on instance creation and then selecting nodes from
571
      # that group
572
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
573
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
574

    
575
      if self.op.opportunistic_locking:
576
        self.opportunistic_locks[locking.LEVEL_NODE] = True
577
    else:
578
      (self.op.pnode_uuid, self.op.pnode) = \
579
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
580
      nodelist = [self.op.pnode_uuid]
581
      if self.op.snode is not None:
582
        (self.op.snode_uuid, self.op.snode) = \
583
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
584
        nodelist.append(self.op.snode_uuid)
585
      self.needed_locks[locking.LEVEL_NODE] = nodelist
586

    
587
    # in case of import lock the source node too
588
    if self.op.mode == constants.INSTANCE_IMPORT:
589
      src_node = self.op.src_node
590
      src_path = self.op.src_path
591

    
592
      if src_path is None:
593
        self.op.src_path = src_path = self.op.instance_name
594

    
595
      if src_node is None:
596
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
597
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
598
        self.op.src_node = None
599
        if os.path.isabs(src_path):
600
          raise errors.OpPrereqError("Importing an instance from a path"
601
                                     " requires a source node option",
602
                                     errors.ECODE_INVAL)
603
      else:
604
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
605
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
606
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
607
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
608
        if not os.path.isabs(src_path):
609
          self.op.src_path = \
610
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
611

    
612
    self.needed_locks[locking.LEVEL_NODE_RES] = \
613
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
614

    
615
    # Optimistically acquire shared group locks (we're reading the
616
    # configuration).  We can't just call GetInstanceNodeGroups, because the
617
    # instance doesn't exist yet. Therefore we lock all node groups of all
618
    # nodes we have.
619
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
620
      # In the case we lock all nodes for opportunistic allocation, we have no
621
      # choice than to lock all groups, because they're allocated before nodes.
622
      # This is sad, but true. At least we release all those we don't need in
623
      # CheckPrereq later.
624
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
625
    else:
626
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
627
        list(self.cfg.GetNodeGroupsFromNodes(
628
          self.needed_locks[locking.LEVEL_NODE]))
629
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
630

    
631
  def DeclareLocks(self, level):
632
    if level == locking.LEVEL_NODE_RES and \
633
      self.opportunistic_locks[locking.LEVEL_NODE]:
634
      # Even when using opportunistic locking, we require the same set of
635
      # NODE_RES locks as we got NODE locks
636
      self.needed_locks[locking.LEVEL_NODE_RES] = \
637
        self.owned_locks(locking.LEVEL_NODE)
638

    
639
  def _RunAllocator(self):
640
    """Run the allocator based on input opcode.
641

642
    """
643
    if self.op.opportunistic_locking:
644
      # Only consider nodes for which a lock is held
645
      node_name_whitelist = self.cfg.GetNodeNames(
646
        self.owned_locks(locking.LEVEL_NODE))
647
    else:
648
      node_name_whitelist = None
649

    
650
    req = _CreateInstanceAllocRequest(self.op, self.disks,
651
                                      self.nics, self.be_full,
652
                                      node_name_whitelist)
653
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
654

    
655
    ial.Run(self.op.iallocator)
656

    
657
    if not ial.success:
658
      # When opportunistic locks are used only a temporary failure is generated
659
      if self.op.opportunistic_locking:
660
        ecode = errors.ECODE_TEMP_NORES
661
      else:
662
        ecode = errors.ECODE_NORES
663

    
664
      raise errors.OpPrereqError("Can't compute nodes using"
665
                                 " iallocator '%s': %s" %
666
                                 (self.op.iallocator, ial.info),
667
                                 ecode)
668

    
669
    (self.op.pnode_uuid, self.op.pnode) = \
670
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
671
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
672
                 self.op.instance_name, self.op.iallocator,
673
                 utils.CommaJoin(ial.result))
674

    
675
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
676

    
677
    if req.RequiredNodes() == 2:
678
      (self.op.snode_uuid, self.op.snode) = \
679
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
680

    
681
  def BuildHooksEnv(self):
682
    """Build hooks env.
683

684
    This runs on master, primary and secondary nodes of the instance.
685

686
    """
687
    env = {
688
      "ADD_MODE": self.op.mode,
689
      }
690
    if self.op.mode == constants.INSTANCE_IMPORT:
691
      env["SRC_NODE"] = self.op.src_node
692
      env["SRC_PATH"] = self.op.src_path
693
      env["SRC_IMAGES"] = self.src_images
694

    
695
    env.update(BuildInstanceHookEnv(
696
      name=self.op.instance_name,
697
      primary_node_name=self.op.pnode,
698
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
699
      status=self.op.start,
700
      os_type=self.op.os_type,
701
      minmem=self.be_full[constants.BE_MINMEM],
702
      maxmem=self.be_full[constants.BE_MAXMEM],
703
      vcpus=self.be_full[constants.BE_VCPUS],
704
      nics=NICListToTuple(self, self.nics),
705
      disk_template=self.op.disk_template,
706
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
707
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
708
             for d in self.disks],
709
      bep=self.be_full,
710
      hvp=self.hv_full,
711
      hypervisor_name=self.op.hypervisor,
712
      tags=self.op.tags,
713
      ))
714

    
715
    return env
716

    
717
  def BuildHooksNodes(self):
718
    """Build hooks nodes.
719

720
    """
721
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
722
    return nl, nl
723

    
724
  def _ReadExportInfo(self):
725
    """Reads the export information from disk.
726

727
    It will override the opcode source node and path with the actual
728
    information, if these two were not specified before.
729

730
    @return: the export information
731

732
    """
733
    assert self.op.mode == constants.INSTANCE_IMPORT
734

    
735
    if self.op.src_node_uuid is None:
736
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
737
      exp_list = self.rpc.call_export_list(locked_nodes)
738
      found = False
739
      for node_uuid in exp_list:
740
        if exp_list[node_uuid].fail_msg:
741
          continue
742
        if self.op.src_path in exp_list[node_uuid].payload:
743
          found = True
744
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
745
          self.op.src_node_uuid = node_uuid
746
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
747
                                            self.op.src_path)
748
          break
749
      if not found:
750
        raise errors.OpPrereqError("No export found for relative path %s" %
751
                                   self.op.src_path, errors.ECODE_INVAL)
752

    
753
    CheckNodeOnline(self, self.op.src_node_uuid)
754
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
755
    result.Raise("No export or invalid export found in dir %s" %
756
                 self.op.src_path)
757

    
758
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
759
    if not export_info.has_section(constants.INISECT_EXP):
760
      raise errors.ProgrammerError("Corrupted export config",
761
                                   errors.ECODE_ENVIRON)
762

    
763
    ei_version = export_info.get(constants.INISECT_EXP, "version")
764
    if int(ei_version) != constants.EXPORT_VERSION:
765
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
766
                                 (ei_version, constants.EXPORT_VERSION),
767
                                 errors.ECODE_ENVIRON)
768
    return export_info
769

    
770
  def _ReadExportParams(self, einfo):
771
    """Use export parameters as defaults.
772

773
    In case the opcode doesn't specify (as in override) some instance
774
    parameters, then try to use them from the export information, if
775
    that declares them.
776

777
    """
778
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
779

    
780
    if not self.op.disks:
781
      disks = []
782
      # TODO: import the disk iv_name too
783
      for idx in range(constants.MAX_DISKS):
784
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
785
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
786
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
787
          disk = {
788
            constants.IDISK_SIZE: disk_sz,
789
            constants.IDISK_NAME: disk_name
790
            }
791
          disks.append(disk)
792
      self.op.disks = disks
793
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
794
        raise errors.OpPrereqError("No disk info specified and the export"
795
                                   " is missing the disk information",
796
                                   errors.ECODE_INVAL)
797

    
798
    if not self.op.nics:
799
      nics = []
800
      for idx in range(constants.MAX_NICS):
801
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
802
          ndict = {}
803
          for name in [constants.INIC_IP,
804
                       constants.INIC_MAC, constants.INIC_NAME]:
805
            nic_param_name = "nic%d_%s" % (idx, name)
806
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
807
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
808
              ndict[name] = v
809
          network = einfo.get(constants.INISECT_INS,
810
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
811
          # in case network is given link and mode are inherited
812
          # from nodegroup's netparams and thus should not be passed here
813
          if network:
814
            ndict[constants.INIC_NETWORK] = network
815
          else:
816
            for name in list(constants.NICS_PARAMETERS):
817
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
818
              ndict[name] = v
819
          nics.append(ndict)
820
        else:
821
          break
822
      self.op.nics = nics
823

    
824
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
825
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
826

    
827
    if (self.op.hypervisor is None and
828
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
829
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
830

    
831
    if einfo.has_section(constants.INISECT_HYP):
832
      # use the export parameters but do not override the ones
833
      # specified by the user
834
      for name, value in einfo.items(constants.INISECT_HYP):
835
        if name not in self.op.hvparams:
836
          self.op.hvparams[name] = value
837

    
838
    if einfo.has_section(constants.INISECT_BEP):
839
      # use the parameters, without overriding
840
      for name, value in einfo.items(constants.INISECT_BEP):
841
        if name not in self.op.beparams:
842
          self.op.beparams[name] = value
843
        # Compatibility for the old "memory" be param
844
        if name == constants.BE_MEMORY:
845
          if constants.BE_MAXMEM not in self.op.beparams:
846
            self.op.beparams[constants.BE_MAXMEM] = value
847
          if constants.BE_MINMEM not in self.op.beparams:
848
            self.op.beparams[constants.BE_MINMEM] = value
849
    else:
850
      # try to read the parameters old style, from the main section
851
      for name in constants.BES_PARAMETERS:
852
        if (name not in self.op.beparams and
853
            einfo.has_option(constants.INISECT_INS, name)):
854
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
855

    
856
    if einfo.has_section(constants.INISECT_OSP):
857
      # use the parameters, without overriding
858
      for name, value in einfo.items(constants.INISECT_OSP):
859
        if name not in self.op.osparams:
860
          self.op.osparams[name] = value
861

    
862
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
863
      # use the parameters, without overriding
864
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
865
        if name not in self.op.osparams_private:
866
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
867

    
868
  def _RevertToDefaults(self, cluster):
869
    """Revert the instance parameters to the default values.
870

871
    """
872
    # hvparams
873
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
874
    for name in self.op.hvparams.keys():
875
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
876
        del self.op.hvparams[name]
877
    # beparams
878
    be_defs = cluster.SimpleFillBE({})
879
    for name in self.op.beparams.keys():
880
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
881
        del self.op.beparams[name]
882
    # nic params
883
    nic_defs = cluster.SimpleFillNIC({})
884
    for nic in self.op.nics:
885
      for name in constants.NICS_PARAMETERS:
886
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
887
          del nic[name]
888
    # osparams
889
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
890
    for name in self.op.osparams.keys():
891
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
892
        del self.op.osparams[name]
893

    
894
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
895
                                    os_params_private={})
896
    for name in self.op.osparams_private.keys():
897
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
898
        del self.op.osparams_private[name]
899

    
900
  def _CalculateFileStorageDir(self):
901
    """Calculate final instance file storage dir.
902

903
    """
904
    # file storage dir calculation/check
905
    self.instance_file_storage_dir = None
906
    if self.op.disk_template in constants.DTS_FILEBASED:
907
      # build the full file storage dir path
908
      joinargs = []
909

    
910
      cfg_storage = None
911
      if self.op.disk_template == constants.DT_FILE:
912
        cfg_storage = self.cfg.GetFileStorageDir()
913
      elif self.op.disk_template == constants.DT_SHARED_FILE:
914
        cfg_storage = self.cfg.GetSharedFileStorageDir()
915
      elif self.op.disk_template == constants.DT_GLUSTER:
916
        cfg_storage = self.cfg.GetGlusterStorageDir()
917

    
918
      if not cfg_storage:
919
        raise errors.OpPrereqError(
920
          "Cluster file storage dir for {tpl} storage type not defined".format(
921
            tpl=repr(self.op.disk_template)
922
          ),
923
          errors.ECODE_STATE
924
      )
925

    
926
      joinargs.append(cfg_storage)
927

    
928
      if self.op.file_storage_dir is not None:
929
        joinargs.append(self.op.file_storage_dir)
930

    
931
      if self.op.disk_template != constants.DT_GLUSTER:
932
        joinargs.append(self.op.instance_name)
933

    
934
      if len(joinargs) > 1:
935
        # pylint: disable=W0142
936
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
937
      else:
938
        self.instance_file_storage_dir = joinargs[0]
939

    
940
  def CheckPrereq(self): # pylint: disable=R0914
941
    """Check prerequisites.
942

943
    """
944
    # Check that the optimistically acquired groups are correct wrt the
945
    # acquired nodes
946
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
947
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
948
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
949
    if not owned_groups.issuperset(cur_groups):
950
      raise errors.OpPrereqError("New instance %s's node groups changed since"
951
                                 " locks were acquired, current groups are"
952
                                 " are '%s', owning groups '%s'; retry the"
953
                                 " operation" %
954
                                 (self.op.instance_name,
955
                                  utils.CommaJoin(cur_groups),
956
                                  utils.CommaJoin(owned_groups)),
957
                                 errors.ECODE_STATE)
958

    
959
    self._CalculateFileStorageDir()
960

    
961
    if self.op.mode == constants.INSTANCE_IMPORT:
962
      export_info = self._ReadExportInfo()
963
      self._ReadExportParams(export_info)
964
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
965
    else:
966
      self._old_instance_name = None
967

    
968
    if (not self.cfg.GetVGName() and
969
        self.op.disk_template not in constants.DTS_NOT_LVM):
970
      raise errors.OpPrereqError("Cluster does not support lvm-based"
971
                                 " instances", errors.ECODE_STATE)
972

    
973
    if (self.op.hypervisor is None or
974
        self.op.hypervisor == constants.VALUE_AUTO):
975
      self.op.hypervisor = self.cfg.GetHypervisorType()
976

    
977
    cluster = self.cfg.GetClusterInfo()
978
    enabled_hvs = cluster.enabled_hypervisors
979
    if self.op.hypervisor not in enabled_hvs:
980
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
981
                                 " cluster (%s)" %
982
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
983
                                 errors.ECODE_STATE)
984

    
985
    # Check tag validity
986
    for tag in self.op.tags:
987
      objects.TaggableObject.ValidateTag(tag)
988

    
989
    # check hypervisor parameter syntax (locally)
990
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
991
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
992
                                      self.op.hvparams)
993
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
994
    hv_type.CheckParameterSyntax(filled_hvp)
995
    self.hv_full = filled_hvp
996
    # check that we don't specify global parameters on an instance
997
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
998
                         "instance", "cluster")
999

    
1000
    # fill and remember the beparams dict
1001
    self.be_full = _ComputeFullBeParams(self.op, cluster)
1002

    
1003
    # build os parameters
1004
    if self.op.osparams_private is None:
1005
      self.op.osparams_private = serializer.PrivateDict()
1006
    if self.op.osparams_secret is None:
1007
      self.op.osparams_secret = serializer.PrivateDict()
1008

    
1009
    self.os_full = cluster.SimpleFillOS(
1010
      self.op.os_type,
1011
      self.op.osparams,
1012
      os_params_private=self.op.osparams_private,
1013
      os_params_secret=self.op.osparams_secret
1014
    )
1015

    
1016
    # now that hvp/bep are in final format, let's reset to defaults,
1017
    # if told to do so
1018
    if self.op.identify_defaults:
1019
      self._RevertToDefaults(cluster)
1020

    
1021
    # NIC buildup
1022
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1023
                             self.proc.GetECId())
1024

    
1025
    # disk checks/pre-build
1026
    default_vg = self.cfg.GetVGName()
1027
    self.disks = ComputeDisks(self.op, default_vg)
1028

    
1029
    if self.op.mode == constants.INSTANCE_IMPORT:
1030
      disk_images = []
1031
      for idx in range(len(self.disks)):
1032
        option = "disk%d_dump" % idx
1033
        if export_info.has_option(constants.INISECT_INS, option):
1034
          # FIXME: are the old os-es, disk sizes, etc. useful?
1035
          export_name = export_info.get(constants.INISECT_INS, option)
1036
          image = utils.PathJoin(self.op.src_path, export_name)
1037
          disk_images.append(image)
1038
        else:
1039
          disk_images.append(False)
1040

    
1041
      self.src_images = disk_images
1042

    
1043
      if self.op.instance_name == self._old_instance_name:
1044
        for idx, nic in enumerate(self.nics):
1045
          if nic.mac == constants.VALUE_AUTO:
1046
            nic_mac_ini = "nic%d_mac" % idx
1047
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1048

    
1049
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1050

    
1051
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1052
    if self.op.ip_check:
1053
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1054
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1055
                                   (self.check_ip, self.op.instance_name),
1056
                                   errors.ECODE_NOTUNIQUE)
1057

    
1058
    #### mac address generation
1059
    # By generating here the mac address both the allocator and the hooks get
1060
    # the real final mac address rather than the 'auto' or 'generate' value.
1061
    # There is a race condition between the generation and the instance object
1062
    # creation, which means that we know the mac is valid now, but we're not
1063
    # sure it will be when we actually add the instance. If things go bad
1064
    # adding the instance will abort because of a duplicate mac, and the
1065
    # creation job will fail.
1066
    for nic in self.nics:
1067
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1068
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1069

    
1070
    #### allocator run
1071

    
1072
    if self.op.iallocator is not None:
1073
      self._RunAllocator()
1074

    
1075
    # Release all unneeded node locks
1076
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1077
                               self.op.src_node_uuid])
1078
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1079
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1080
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1081
    # Release all unneeded group locks
1082
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1083
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1084

    
1085
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1086
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1087
      "Node locks differ from node resource locks"
1088

    
1089
    #### node related checks
1090

    
1091
    # check primary node
1092
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1093
    assert self.pnode is not None, \
1094
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1095
    if pnode.offline:
1096
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1097
                                 pnode.name, errors.ECODE_STATE)
1098
    if pnode.drained:
1099
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1100
                                 pnode.name, errors.ECODE_STATE)
1101
    if not pnode.vm_capable:
1102
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1103
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1104

    
1105
    self.secondaries = []
1106

    
1107
    # Fill in any IPs from IP pools. This must happen here, because we need to
1108
    # know the nic's primary node, as specified by the iallocator
1109
    for idx, nic in enumerate(self.nics):
1110
      net_uuid = nic.network
1111
      if net_uuid is not None:
1112
        nobj = self.cfg.GetNetwork(net_uuid)
1113
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1114
        if netparams is None:
1115
          raise errors.OpPrereqError("No netparams found for network"
1116
                                     " %s. Probably not connected to"
1117
                                     " node's %s nodegroup" %
1118
                                     (nobj.name, self.pnode.name),
1119
                                     errors.ECODE_INVAL)
1120
        self.LogInfo("NIC/%d inherits netparams %s" %
1121
                     (idx, netparams.values()))
1122
        nic.nicparams = dict(netparams)
1123
        if nic.ip is not None:
1124
          if nic.ip.lower() == constants.NIC_IP_POOL:
1125
            try:
1126
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1127
            except errors.ReservationError:
1128
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1129
                                         " from the address pool" % idx,
1130
                                         errors.ECODE_STATE)
1131
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1132
          else:
1133
            try:
1134
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1135
                                 check=self.op.conflicts_check)
1136
            except errors.ReservationError:
1137
              raise errors.OpPrereqError("IP address %s already in use"
1138
                                         " or does not belong to network %s" %
1139
                                         (nic.ip, nobj.name),
1140
                                         errors.ECODE_NOTUNIQUE)
1141

    
1142
      # net is None, ip None or given
1143
      elif self.op.conflicts_check:
1144
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1145

    
1146
    # mirror node verification
1147
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1148
      if self.op.snode_uuid == pnode.uuid:
1149
        raise errors.OpPrereqError("The secondary node cannot be the"
1150
                                   " primary node", errors.ECODE_INVAL)
1151
      CheckNodeOnline(self, self.op.snode_uuid)
1152
      CheckNodeNotDrained(self, self.op.snode_uuid)
1153
      CheckNodeVmCapable(self, self.op.snode_uuid)
1154
      self.secondaries.append(self.op.snode_uuid)
1155

    
1156
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1157
      if pnode.group != snode.group:
1158
        self.LogWarning("The primary and secondary nodes are in two"
1159
                        " different node groups; the disk parameters"
1160
                        " from the first disk's node group will be"
1161
                        " used")
1162

    
1163
    nodes = [pnode]
1164
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1165
      nodes.append(snode)
1166
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1167
    excl_stor = compat.any(map(has_es, nodes))
1168
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1169
      raise errors.OpPrereqError("Disk template %s not supported with"
1170
                                 " exclusive storage" % self.op.disk_template,
1171
                                 errors.ECODE_STATE)
1172
    for disk in self.disks:
1173
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1174

    
1175
    node_uuids = [pnode.uuid] + self.secondaries
1176

    
1177
    if not self.adopt_disks:
1178
      if self.op.disk_template == constants.DT_RBD:
1179
        # _CheckRADOSFreeSpace() is just a placeholder.
1180
        # Any function that checks prerequisites can be placed here.
1181
        # Check if there is enough space on the RADOS cluster.
1182
        CheckRADOSFreeSpace()
1183
      elif self.op.disk_template == constants.DT_EXT:
1184
        # FIXME: Function that checks prereqs if needed
1185
        pass
1186
      elif self.op.disk_template in constants.DTS_LVM:
1187
        # Check lv size requirements, if not adopting
1188
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1189
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1190
      else:
1191
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1192
        pass
1193

    
1194
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1195
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1196
                                disk[constants.IDISK_ADOPT])
1197
                     for disk in self.disks])
1198
      if len(all_lvs) != len(self.disks):
1199
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1200
                                   errors.ECODE_INVAL)
1201
      for lv_name in all_lvs:
1202
        try:
1203
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1204
          # to ReserveLV uses the same syntax
1205
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1206
        except errors.ReservationError:
1207
          raise errors.OpPrereqError("LV named %s used by another instance" %
1208
                                     lv_name, errors.ECODE_NOTUNIQUE)
1209

    
1210
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1211
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1212

    
1213
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1214
                                       vg_names.payload.keys())[pnode.uuid]
1215
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1216
      node_lvs = node_lvs.payload
1217

    
1218
      delta = all_lvs.difference(node_lvs.keys())
1219
      if delta:
1220
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1221
                                   utils.CommaJoin(delta),
1222
                                   errors.ECODE_INVAL)
1223
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1224
      if online_lvs:
1225
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1226
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1227
                                   errors.ECODE_STATE)
1228
      # update the size of disk based on what is found
1229
      for dsk in self.disks:
1230
        dsk[constants.IDISK_SIZE] = \
1231
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1232
                                        dsk[constants.IDISK_ADOPT])][0]))
1233

    
1234
    elif self.op.disk_template == constants.DT_BLOCK:
1235
      # Normalize and de-duplicate device paths
1236
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1237
                       for disk in self.disks])
1238
      if len(all_disks) != len(self.disks):
1239
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1240
                                   errors.ECODE_INVAL)
1241
      baddisks = [d for d in all_disks
1242
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1243
      if baddisks:
1244
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1245
                                   " cannot be adopted" %
1246
                                   (utils.CommaJoin(baddisks),
1247
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1248
                                   errors.ECODE_INVAL)
1249

    
1250
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1251
                                            list(all_disks))[pnode.uuid]
1252
      node_disks.Raise("Cannot get block device information from node %s" %
1253
                       pnode.name)
1254
      node_disks = node_disks.payload
1255
      delta = all_disks.difference(node_disks.keys())
1256
      if delta:
1257
        raise errors.OpPrereqError("Missing block device(s): %s" %
1258
                                   utils.CommaJoin(delta),
1259
                                   errors.ECODE_INVAL)
1260
      for dsk in self.disks:
1261
        dsk[constants.IDISK_SIZE] = \
1262
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1263

    
1264
    # Check disk access param to be compatible with specified hypervisor
1265
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1266
    node_group = self.cfg.GetNodeGroup(node_info.group)
1267
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1268
    access_type = disk_params[self.op.disk_template].get(
1269
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1270
    )
1271

    
1272
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1273
                                            self.op.disk_template,
1274
                                            access_type):
1275
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1276
                                 " used with %s disk access param" %
1277
                                 (self.op.hypervisor, access_type),
1278
                                  errors.ECODE_STATE)
1279

    
1280
    # Verify instance specs
1281
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1282
    ispec = {
1283
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1284
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1285
      constants.ISPEC_DISK_COUNT: len(self.disks),
1286
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1287
                                  for disk in self.disks],
1288
      constants.ISPEC_NIC_COUNT: len(self.nics),
1289
      constants.ISPEC_SPINDLE_USE: spindle_use,
1290
      }
1291

    
1292
    group_info = self.cfg.GetNodeGroup(pnode.group)
1293
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1294
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1295
                                               self.op.disk_template)
1296
    if not self.op.ignore_ipolicy and res:
1297
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1298
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1299
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1300

    
1301
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1302

    
1303
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1304
    # check OS parameters (remotely)
1305
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1306

    
1307
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1308

    
1309
    #TODO: _CheckExtParams (remotely)
1310
    # Check parameters for extstorage
1311

    
1312
    # memory check on primary node
1313
    #TODO(dynmem): use MINMEM for checking
1314
    if self.op.start:
1315
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1316
                                self.op.hvparams)
1317
      CheckNodeFreeMemory(self, self.pnode.uuid,
1318
                          "creating instance %s" % self.op.instance_name,
1319
                          self.be_full[constants.BE_MAXMEM],
1320
                          self.op.hypervisor, hvfull)
1321

    
1322
    self.dry_run_result = list(node_uuids)
1323

    
1324
  def Exec(self, feedback_fn):
1325
    """Create and add the instance to the cluster.
1326

1327
    """
1328
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1329
                self.owned_locks(locking.LEVEL_NODE)), \
1330
      "Node locks differ from node resource locks"
1331
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1332

    
1333
    ht_kind = self.op.hypervisor
1334
    if ht_kind in constants.HTS_REQ_PORT:
1335
      network_port = self.cfg.AllocatePort()
1336
    else:
1337
      network_port = None
1338

    
1339
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1340

    
1341
    # This is ugly but we got a chicken-egg problem here
1342
    # We can only take the group disk parameters, as the instance
1343
    # has no disks yet (we are generating them right here).
1344
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1345
    disks = GenerateDiskTemplate(self,
1346
                                 self.op.disk_template,
1347
                                 instance_uuid, self.pnode.uuid,
1348
                                 self.secondaries,
1349
                                 self.disks,
1350
                                 self.instance_file_storage_dir,
1351
                                 self.op.file_driver,
1352
                                 0,
1353
                                 feedback_fn,
1354
                                 self.cfg.GetGroupDiskParams(nodegroup))
1355

    
1356
    iobj = objects.Instance(name=self.op.instance_name,
1357
                            uuid=instance_uuid,
1358
                            os=self.op.os_type,
1359
                            primary_node=self.pnode.uuid,
1360
                            nics=self.nics, disks=disks,
1361
                            disk_template=self.op.disk_template,
1362
                            disks_active=False,
1363
                            admin_state=constants.ADMINST_DOWN,
1364
                            network_port=network_port,
1365
                            beparams=self.op.beparams,
1366
                            hvparams=self.op.hvparams,
1367
                            hypervisor=self.op.hypervisor,
1368
                            osparams=self.op.osparams,
1369
                            osparams_private=self.op.osparams_private,
1370
                            )
1371

    
1372
    if self.op.tags:
1373
      for tag in self.op.tags:
1374
        iobj.AddTag(tag)
1375

    
1376
    if self.adopt_disks:
1377
      if self.op.disk_template == constants.DT_PLAIN:
1378
        # rename LVs to the newly-generated names; we need to construct
1379
        # 'fake' LV disks with the old data, plus the new unique_id
1380
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1381
        rename_to = []
1382
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1383
          rename_to.append(t_dsk.logical_id)
1384
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1385
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1386
                                               zip(tmp_disks, rename_to))
1387
        result.Raise("Failed to rename adoped LVs")
1388
    else:
1389
      feedback_fn("* creating instance disks...")
1390
      try:
1391
        CreateDisks(self, iobj)
1392
      except errors.OpExecError:
1393
        self.LogWarning("Device creation failed")
1394
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1395
        raise
1396

    
1397
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1398

    
1399
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1400

    
1401
    # Declare that we don't want to remove the instance lock anymore, as we've
1402
    # added the instance to the config
1403
    del self.remove_locks[locking.LEVEL_INSTANCE]
1404

    
1405
    if self.op.mode == constants.INSTANCE_IMPORT:
1406
      # Release unused nodes
1407
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1408
    else:
1409
      # Release all nodes
1410
      ReleaseLocks(self, locking.LEVEL_NODE)
1411

    
1412
    disk_abort = False
1413
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1414
      feedback_fn("* wiping instance disks...")
1415
      try:
1416
        WipeDisks(self, iobj)
1417
      except errors.OpExecError, err:
1418
        logging.exception("Wiping disks failed")
1419
        self.LogWarning("Wiping instance disks failed (%s)", err)
1420
        disk_abort = True
1421

    
1422
    if disk_abort:
1423
      # Something is already wrong with the disks, don't do anything else
1424
      pass
1425
    elif self.op.wait_for_sync:
1426
      disk_abort = not WaitForSync(self, iobj)
1427
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1428
      # make sure the disks are not degraded (still sync-ing is ok)
1429
      feedback_fn("* checking mirrors status")
1430
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1431
    else:
1432
      disk_abort = False
1433

    
1434
    if disk_abort:
1435
      RemoveDisks(self, iobj)
1436
      self.cfg.RemoveInstance(iobj.uuid)
1437
      # Make sure the instance lock gets removed
1438
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1439
      raise errors.OpExecError("There are some degraded disks for"
1440
                               " this instance")
1441

    
1442
    # instance disks are now active
1443
    iobj.disks_active = True
1444

    
1445
    # Release all node resource locks
1446
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1447

    
1448
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1449
      if self.op.mode == constants.INSTANCE_CREATE:
1450
        if not self.op.no_install:
1451
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1452
                        not self.op.wait_for_sync)
1453
          if pause_sync:
1454
            feedback_fn("* pausing disk sync to install instance OS")
1455
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1456
                                                              (iobj.disks,
1457
                                                               iobj), True)
1458
            for idx, success in enumerate(result.payload):
1459
              if not success:
1460
                logging.warn("pause-sync of instance %s for disk %d failed",
1461
                             self.op.instance_name, idx)
1462

    
1463
          feedback_fn("* running the instance OS create scripts...")
1464
          # FIXME: pass debug option from opcode to backend
1465
          os_add_result = \
1466
            self.rpc.call_instance_os_add(self.pnode.uuid,
1467
                                          (iobj, self.op.osparams_secret),
1468
                                          False,
1469
                                          self.op.debug_level)
1470
          if pause_sync:
1471
            feedback_fn("* resuming disk sync")
1472
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1473
                                                              (iobj.disks,
1474
                                                               iobj), False)
1475
            for idx, success in enumerate(result.payload):
1476
              if not success:
1477
                logging.warn("resume-sync of instance %s for disk %d failed",
1478
                             self.op.instance_name, idx)
1479

    
1480
          os_add_result.Raise("Could not add os for instance %s"
1481
                              " on node %s" % (self.op.instance_name,
1482
                                               self.pnode.name))
1483

    
1484
      else:
1485
        if self.op.mode == constants.INSTANCE_IMPORT:
1486
          feedback_fn("* running the instance OS import scripts...")
1487

    
1488
          transfers = []
1489

    
1490
          for idx, image in enumerate(self.src_images):
1491
            if not image:
1492
              continue
1493

    
1494
            # FIXME: pass debug option from opcode to backend
1495
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1496
                                               constants.IEIO_FILE, (image, ),
1497
                                               constants.IEIO_SCRIPT,
1498
                                               ((iobj.disks[idx], iobj), idx),
1499
                                               None)
1500
            transfers.append(dt)
1501

    
1502
          import_result = \
1503
            masterd.instance.TransferInstanceData(self, feedback_fn,
1504
                                                  self.op.src_node_uuid,
1505
                                                  self.pnode.uuid,
1506
                                                  self.pnode.secondary_ip,
1507
                                                  self.op.compress,
1508
                                                  iobj, transfers)
1509
          if not compat.all(import_result):
1510
            self.LogWarning("Some disks for instance %s on node %s were not"
1511
                            " imported successfully" % (self.op.instance_name,
1512
                                                        self.pnode.name))
1513

    
1514
          rename_from = self._old_instance_name
1515

    
1516
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1517
          feedback_fn("* preparing remote import...")
1518
          # The source cluster will stop the instance before attempting to make
1519
          # a connection. In some cases stopping an instance can take a long
1520
          # time, hence the shutdown timeout is added to the connection
1521
          # timeout.
1522
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1523
                             self.op.source_shutdown_timeout)
1524
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1525

    
1526
          assert iobj.primary_node == self.pnode.uuid
1527
          disk_results = \
1528
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1529
                                          self.source_x509_ca,
1530
                                          self._cds, self.op.compress, timeouts)
1531
          if not compat.all(disk_results):
1532
            # TODO: Should the instance still be started, even if some disks
1533
            # failed to import (valid for local imports, too)?
1534
            self.LogWarning("Some disks for instance %s on node %s were not"
1535
                            " imported successfully" % (self.op.instance_name,
1536
                                                        self.pnode.name))
1537

    
1538
          rename_from = self.source_instance_name
1539

    
1540
        else:
1541
          # also checked in the prereq part
1542
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1543
                                       % self.op.mode)
1544

    
1545
        # Run rename script on newly imported instance
1546
        assert iobj.name == self.op.instance_name
1547
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1548
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1549
                                                   rename_from,
1550
                                                   self.op.debug_level)
1551
        result.Warn("Failed to run rename script for %s on node %s" %
1552
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1553

    
1554
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1555

    
1556
    if self.op.start:
1557
      iobj.admin_state = constants.ADMINST_UP
1558
      self.cfg.Update(iobj, feedback_fn)
1559
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1560
                   self.pnode.name)
1561
      feedback_fn("* starting instance...")
1562
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1563
                                            False, self.op.reason)
1564
      result.Raise("Could not start instance")
1565

    
1566
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1567

    
1568

    
1569
class LUInstanceRename(LogicalUnit):
1570
  """Rename an instance.
1571

1572
  """
1573
  HPATH = "instance-rename"
1574
  HTYPE = constants.HTYPE_INSTANCE
1575

    
1576
  def CheckArguments(self):
1577
    """Check arguments.
1578

1579
    """
1580
    if self.op.ip_check and not self.op.name_check:
1581
      # TODO: make the ip check more flexible and not depend on the name check
1582
      raise errors.OpPrereqError("IP address check requires a name check",
1583
                                 errors.ECODE_INVAL)
1584

    
1585
  def BuildHooksEnv(self):
1586
    """Build hooks env.
1587

1588
    This runs on master, primary and secondary nodes of the instance.
1589

1590
    """
1591
    env = BuildInstanceHookEnvByObject(self, self.instance)
1592
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1593
    return env
1594

    
1595
  def BuildHooksNodes(self):
1596
    """Build hooks nodes.
1597

1598
    """
1599
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1600
    return (nl, nl)
1601

    
1602
  def CheckPrereq(self):
1603
    """Check prerequisites.
1604

1605
    This checks that the instance is in the cluster and is not running.
1606

1607
    """
1608
    (self.op.instance_uuid, self.op.instance_name) = \
1609
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1610
                                self.op.instance_name)
1611
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1612
    assert instance is not None
1613

    
1614
    # It should actually not happen that an instance is running with a disabled
1615
    # disk template, but in case it does, the renaming of file-based instances
1616
    # will fail horribly. Thus, we test it before.
1617
    if (instance.disk_template in constants.DTS_FILEBASED and
1618
        self.op.new_name != instance.name):
1619
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1620
                               instance.disk_template)
1621

    
1622
    CheckNodeOnline(self, instance.primary_node)
1623
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1624
                       msg="cannot rename")
1625
    self.instance = instance
1626

    
1627
    new_name = self.op.new_name
1628
    if self.op.name_check:
1629
      hostname = _CheckHostnameSane(self, new_name)
1630
      new_name = self.op.new_name = hostname.name
1631
      if (self.op.ip_check and
1632
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1633
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1634
                                   (hostname.ip, new_name),
1635
                                   errors.ECODE_NOTUNIQUE)
1636

    
1637
    instance_names = [inst.name for
1638
                      inst in self.cfg.GetAllInstancesInfo().values()]
1639
    if new_name in instance_names and new_name != instance.name:
1640
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1641
                                 new_name, errors.ECODE_EXISTS)
1642

    
1643
  def Exec(self, feedback_fn):
1644
    """Rename the instance.
1645

1646
    """
1647
    old_name = self.instance.name
1648

    
1649
    rename_file_storage = False
1650
    if (self.instance.disk_template in (constants.DT_FILE,
1651
                                        constants.DT_SHARED_FILE) and
1652
        self.op.new_name != self.instance.name):
1653
      old_file_storage_dir = os.path.dirname(
1654
                               self.instance.disks[0].logical_id[1])
1655
      rename_file_storage = True
1656

    
1657
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1658
    # Change the instance lock. This is definitely safe while we hold the BGL.
1659
    # Otherwise the new lock would have to be added in acquired mode.
1660
    assert self.REQ_BGL
1661
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1662
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1663
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1664

    
1665
    # re-read the instance from the configuration after rename
1666
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1667

    
1668
    if rename_file_storage:
1669
      new_file_storage_dir = os.path.dirname(
1670
                               renamed_inst.disks[0].logical_id[1])
1671
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1672
                                                     old_file_storage_dir,
1673
                                                     new_file_storage_dir)
1674
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1675
                   " (but the instance has been renamed in Ganeti)" %
1676
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1677
                    old_file_storage_dir, new_file_storage_dir))
1678

    
1679
    StartInstanceDisks(self, renamed_inst, None)
1680
    # update info on disks
1681
    info = GetInstanceInfoText(renamed_inst)
1682
    for (idx, disk) in enumerate(renamed_inst.disks):
1683
      for node_uuid in renamed_inst.all_nodes:
1684
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1685
                                                (disk, renamed_inst), info)
1686
        result.Warn("Error setting info on node %s for disk %s" %
1687
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1688
    try:
1689
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1690
                                                 renamed_inst, old_name,
1691
                                                 self.op.debug_level)
1692
      result.Warn("Could not run OS rename script for instance %s on node %s"
1693
                  " (but the instance has been renamed in Ganeti)" %
1694
                  (renamed_inst.name,
1695
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1696
                  self.LogWarning)
1697
    finally:
1698
      ShutdownInstanceDisks(self, renamed_inst)
1699

    
1700
    return renamed_inst.name
1701

    
1702

    
1703
class LUInstanceRemove(LogicalUnit):
1704
  """Remove an instance.
1705

1706
  """
1707
  HPATH = "instance-remove"
1708
  HTYPE = constants.HTYPE_INSTANCE
1709
  REQ_BGL = False
1710

    
1711
  def ExpandNames(self):
1712
    self._ExpandAndLockInstance()
1713
    self.needed_locks[locking.LEVEL_NODE] = []
1714
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1715
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1716

    
1717
  def DeclareLocks(self, level):
1718
    if level == locking.LEVEL_NODE:
1719
      self._LockInstancesNodes()
1720
    elif level == locking.LEVEL_NODE_RES:
1721
      # Copy node locks
1722
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1723
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1724

    
1725
  def BuildHooksEnv(self):
1726
    """Build hooks env.
1727

1728
    This runs on master, primary and secondary nodes of the instance.
1729

1730
    """
1731
    env = BuildInstanceHookEnvByObject(self, self.instance)
1732
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1733
    return env
1734

    
1735
  def BuildHooksNodes(self):
1736
    """Build hooks nodes.
1737

1738
    """
1739
    nl = [self.cfg.GetMasterNode()]
1740
    nl_post = list(self.instance.all_nodes) + nl
1741
    return (nl, nl_post)
1742

    
1743
  def CheckPrereq(self):
1744
    """Check prerequisites.
1745

1746
    This checks that the instance is in the cluster.
1747

1748
    """
1749
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1750
    assert self.instance is not None, \
1751
      "Cannot retrieve locked instance %s" % self.op.instance_name
1752

    
1753
  def Exec(self, feedback_fn):
1754
    """Remove the instance.
1755

1756
    """
1757
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1758
                 self.cfg.GetNodeName(self.instance.primary_node))
1759

    
1760
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1761
                                             self.instance,
1762
                                             self.op.shutdown_timeout,
1763
                                             self.op.reason)
1764
    if self.op.ignore_failures:
1765
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1766
    else:
1767
      result.Raise("Could not shutdown instance %s on node %s" %
1768
                   (self.instance.name,
1769
                    self.cfg.GetNodeName(self.instance.primary_node)))
1770

    
1771
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1772
            self.owned_locks(locking.LEVEL_NODE_RES))
1773
    assert not (set(self.instance.all_nodes) -
1774
                self.owned_locks(locking.LEVEL_NODE)), \
1775
      "Not owning correct locks"
1776

    
1777
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1778

    
1779

    
1780
class LUInstanceMove(LogicalUnit):
1781
  """Move an instance by data-copying.
1782

1783
  """
1784
  HPATH = "instance-move"
1785
  HTYPE = constants.HTYPE_INSTANCE
1786
  REQ_BGL = False
1787

    
1788
  def ExpandNames(self):
1789
    self._ExpandAndLockInstance()
1790
    (self.op.target_node_uuid, self.op.target_node) = \
1791
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1792
                            self.op.target_node)
1793
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1794
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1795
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1796

    
1797
  def DeclareLocks(self, level):
1798
    if level == locking.LEVEL_NODE:
1799
      self._LockInstancesNodes(primary_only=True)
1800
    elif level == locking.LEVEL_NODE_RES:
1801
      # Copy node locks
1802
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1803
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1804

    
1805
  def BuildHooksEnv(self):
1806
    """Build hooks env.
1807

1808
    This runs on master, primary and target nodes of the instance.
1809

1810
    """
1811
    env = {
1812
      "TARGET_NODE": self.op.target_node,
1813
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1814
      }
1815
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1816
    return env
1817

    
1818
  def BuildHooksNodes(self):
1819
    """Build hooks nodes.
1820

1821
    """
1822
    nl = [
1823
      self.cfg.GetMasterNode(),
1824
      self.instance.primary_node,
1825
      self.op.target_node_uuid,
1826
      ]
1827
    return (nl, nl)
1828

    
1829
  def CheckPrereq(self):
1830
    """Check prerequisites.
1831

1832
    This checks that the instance is in the cluster.
1833

1834
    """
1835
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1836
    assert self.instance is not None, \
1837
      "Cannot retrieve locked instance %s" % self.op.instance_name
1838

    
1839
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1840
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1841
                                 self.instance.disk_template,
1842
                                 errors.ECODE_STATE)
1843

    
1844
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1845
    assert target_node is not None, \
1846
      "Cannot retrieve locked node %s" % self.op.target_node
1847

    
1848
    self.target_node_uuid = target_node.uuid
1849
    if target_node.uuid == self.instance.primary_node:
1850
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1851
                                 (self.instance.name, target_node.name),
1852
                                 errors.ECODE_STATE)
1853

    
1854
    cluster = self.cfg.GetClusterInfo()
1855
    bep = cluster.FillBE(self.instance)
1856

    
1857
    for idx, dsk in enumerate(self.instance.disks):
1858
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1859
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1860
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1861
                                   " cannot copy" % idx, errors.ECODE_STATE)
1862

    
1863
    CheckNodeOnline(self, target_node.uuid)
1864
    CheckNodeNotDrained(self, target_node.uuid)
1865
    CheckNodeVmCapable(self, target_node.uuid)
1866
    group_info = self.cfg.GetNodeGroup(target_node.group)
1867
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1868
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1869
                           ignore=self.op.ignore_ipolicy)
1870

    
1871
    if self.instance.admin_state == constants.ADMINST_UP:
1872
      # check memory requirements on the target node
1873
      CheckNodeFreeMemory(
1874
          self, target_node.uuid, "failing over instance %s" %
1875
          self.instance.name, bep[constants.BE_MAXMEM],
1876
          self.instance.hypervisor,
1877
          cluster.hvparams[self.instance.hypervisor])
1878
    else:
1879
      self.LogInfo("Not checking memory on the secondary node as"
1880
                   " instance will not be started")
1881

    
1882
    # check bridge existance
1883
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1884

    
1885
  def Exec(self, feedback_fn):
1886
    """Move an instance.
1887

1888
    The move is done by shutting it down on its present node, copying
1889
    the data over (slow) and starting it on the new node.
1890

1891
    """
1892
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1893
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1894

    
1895
    self.LogInfo("Shutting down instance %s on source node %s",
1896
                 self.instance.name, source_node.name)
1897

    
1898
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1899
            self.owned_locks(locking.LEVEL_NODE_RES))
1900

    
1901
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1902
                                             self.op.shutdown_timeout,
1903
                                             self.op.reason)
1904
    if self.op.ignore_consistency:
1905
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1906
                  " anyway. Please make sure node %s is down. Error details" %
1907
                  (self.instance.name, source_node.name, source_node.name),
1908
                  self.LogWarning)
1909
    else:
1910
      result.Raise("Could not shutdown instance %s on node %s" %
1911
                   (self.instance.name, source_node.name))
1912

    
1913
    # create the target disks
1914
    try:
1915
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1916
    except errors.OpExecError:
1917
      self.LogWarning("Device creation failed")
1918
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1919
      raise
1920

    
1921
    errs = []
1922
    transfers = []
1923
    # activate, get path, create transfer jobs
1924
    for idx, disk in enumerate(self.instance.disks):
1925
      # FIXME: pass debug option from opcode to backend
1926
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1927
                                         constants.IEIO_RAW_DISK,
1928
                                         (disk, self.instance),
1929
                                         constants.IEIO_RAW_DISK,
1930
                                         (disk, self.instance),
1931
                                         None)
1932
      transfers.append(dt)
1933

    
1934
    import_result = \
1935
      masterd.instance.TransferInstanceData(self, feedback_fn,
1936
                                            source_node.uuid,
1937
                                            target_node.uuid,
1938
                                            target_node.secondary_ip,
1939
                                            self.op.compress,
1940
                                            self.instance, transfers)
1941
    if not compat.all(import_result):
1942
      errs.append("Failed to transfer instance data")
1943

    
1944
    if errs:
1945
      self.LogWarning("Some disks failed to copy, aborting")
1946
      try:
1947
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1948
      finally:
1949
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1950
        raise errors.OpExecError("Errors during disk copy: %s" %
1951
                                 (",".join(errs),))
1952

    
1953
    self.instance.primary_node = target_node.uuid
1954
    self.cfg.Update(self.instance, feedback_fn)
1955

    
1956
    self.LogInfo("Removing the disks on the original node")
1957
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1958

    
1959
    # Only start the instance if it's marked as up
1960
    if self.instance.admin_state == constants.ADMINST_UP:
1961
      self.LogInfo("Starting instance %s on node %s",
1962
                   self.instance.name, target_node.name)
1963

    
1964
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1965
                                          ignore_secondaries=True)
1966
      if not disks_ok:
1967
        ShutdownInstanceDisks(self, self.instance)
1968
        raise errors.OpExecError("Can't activate the instance's disks")
1969

    
1970
      result = self.rpc.call_instance_start(target_node.uuid,
1971
                                            (self.instance, None, None), False,
1972
                                            self.op.reason)
1973
      msg = result.fail_msg
1974
      if msg:
1975
        ShutdownInstanceDisks(self, self.instance)
1976
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1977
                                 (self.instance.name, target_node.name, msg))
1978

    
1979

    
1980
class LUInstanceMultiAlloc(NoHooksLU):
1981
  """Allocates multiple instances at the same time.
1982

1983
  """
1984
  REQ_BGL = False
1985

    
1986
  def CheckArguments(self):
1987
    """Check arguments.
1988

1989
    """
1990
    nodes = []
1991
    for inst in self.op.instances:
1992
      if inst.iallocator is not None:
1993
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1994
                                   " instance objects", errors.ECODE_INVAL)
1995
      nodes.append(bool(inst.pnode))
1996
      if inst.disk_template in constants.DTS_INT_MIRROR:
1997
        nodes.append(bool(inst.snode))
1998

    
1999
    has_nodes = compat.any(nodes)
2000
    if compat.all(nodes) ^ has_nodes:
2001
      raise errors.OpPrereqError("There are instance objects providing"
2002
                                 " pnode/snode while others do not",
2003
                                 errors.ECODE_INVAL)
2004

    
2005
    if not has_nodes and self.op.iallocator is None:
2006
      default_iallocator = self.cfg.GetDefaultIAllocator()
2007
      if default_iallocator:
2008
        self.op.iallocator = default_iallocator
2009
      else:
2010
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2011
                                   " given and no cluster-wide default"
2012
                                   " iallocator found; please specify either"
2013
                                   " an iallocator or nodes on the instances"
2014
                                   " or set a cluster-wide default iallocator",
2015
                                   errors.ECODE_INVAL)
2016

    
2017
    _CheckOpportunisticLocking(self.op)
2018

    
2019
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2020
    if dups:
2021
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2022
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2023

    
2024
  def ExpandNames(self):
2025
    """Calculate the locks.
2026

2027
    """
2028
    self.share_locks = ShareAll()
2029
    self.needed_locks = {
2030
      # iallocator will select nodes and even if no iallocator is used,
2031
      # collisions with LUInstanceCreate should be avoided
2032
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2033
      }
2034

    
2035
    if self.op.iallocator:
2036
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2037
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2038

    
2039
      if self.op.opportunistic_locking:
2040
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2041
    else:
2042
      nodeslist = []
2043
      for inst in self.op.instances:
2044
        (inst.pnode_uuid, inst.pnode) = \
2045
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2046
        nodeslist.append(inst.pnode_uuid)
2047
        if inst.snode is not None:
2048
          (inst.snode_uuid, inst.snode) = \
2049
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2050
          nodeslist.append(inst.snode_uuid)
2051

    
2052
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2053
      # Lock resources of instance's primary and secondary nodes (copy to
2054
      # prevent accidential modification)
2055
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2056

    
2057
  def DeclareLocks(self, level):
2058
    if level == locking.LEVEL_NODE_RES and \
2059
      self.opportunistic_locks[locking.LEVEL_NODE]:
2060
      # Even when using opportunistic locking, we require the same set of
2061
      # NODE_RES locks as we got NODE locks
2062
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2063
        self.owned_locks(locking.LEVEL_NODE)
2064

    
2065
  def CheckPrereq(self):
2066
    """Check prerequisite.
2067

2068
    """
2069
    if self.op.iallocator:
2070
      cluster = self.cfg.GetClusterInfo()
2071
      default_vg = self.cfg.GetVGName()
2072
      ec_id = self.proc.GetECId()
2073

    
2074
      if self.op.opportunistic_locking:
2075
        # Only consider nodes for which a lock is held
2076
        node_whitelist = self.cfg.GetNodeNames(
2077
                           list(self.owned_locks(locking.LEVEL_NODE)))
2078
      else:
2079
        node_whitelist = None
2080

    
2081
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2082
                                           _ComputeNics(op, cluster, None,
2083
                                                        self.cfg, ec_id),
2084
                                           _ComputeFullBeParams(op, cluster),
2085
                                           node_whitelist)
2086
               for op in self.op.instances]
2087

    
2088
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2089
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2090

    
2091
      ial.Run(self.op.iallocator)
2092

    
2093
      if not ial.success:
2094
        raise errors.OpPrereqError("Can't compute nodes using"
2095
                                   " iallocator '%s': %s" %
2096
                                   (self.op.iallocator, ial.info),
2097
                                   errors.ECODE_NORES)
2098

    
2099
      self.ia_result = ial.result
2100

    
2101
    if self.op.dry_run:
2102
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2103
        constants.JOB_IDS_KEY: [],
2104
        })
2105

    
2106
  def _ConstructPartialResult(self):
2107
    """Contructs the partial result.
2108

2109
    """
2110
    if self.op.iallocator:
2111
      (allocatable, failed_insts) = self.ia_result
2112
      allocatable_insts = map(compat.fst, allocatable)
2113
    else:
2114
      allocatable_insts = [op.instance_name for op in self.op.instances]
2115
      failed_insts = []
2116

    
2117
    return {
2118
      constants.ALLOCATABLE_KEY: allocatable_insts,
2119
      constants.FAILED_KEY: failed_insts,
2120
      }
2121

    
2122
  def Exec(self, feedback_fn):
2123
    """Executes the opcode.
2124

2125
    """
2126
    jobs = []
2127
    if self.op.iallocator:
2128
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2129
      (allocatable, failed) = self.ia_result
2130

    
2131
      for (name, node_names) in allocatable:
2132
        op = op2inst.pop(name)
2133

    
2134
        (op.pnode_uuid, op.pnode) = \
2135
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2136
        if len(node_names) > 1:
2137
          (op.snode_uuid, op.snode) = \
2138
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2139

    
2140
          jobs.append([op])
2141

    
2142
        missing = set(op2inst.keys()) - set(failed)
2143
        assert not missing, \
2144
          "Iallocator did return incomplete result: %s" % \
2145
          utils.CommaJoin(missing)
2146
    else:
2147
      jobs.extend([op] for op in self.op.instances)
2148

    
2149
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2150

    
2151

    
2152
class _InstNicModPrivate:
2153
  """Data structure for network interface modifications.
2154

2155
  Used by L{LUInstanceSetParams}.
2156

2157
  """
2158
  def __init__(self):
2159
    self.params = None
2160
    self.filled = None
2161

    
2162

    
2163
def _PrepareContainerMods(mods, private_fn):
2164
  """Prepares a list of container modifications by adding a private data field.
2165

2166
  @type mods: list of tuples; (operation, index, parameters)
2167
  @param mods: List of modifications
2168
  @type private_fn: callable or None
2169
  @param private_fn: Callable for constructing a private data field for a
2170
    modification
2171
  @rtype: list
2172

2173
  """
2174
  if private_fn is None:
2175
    fn = lambda: None
2176
  else:
2177
    fn = private_fn
2178

    
2179
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2180

    
2181

    
2182
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2183
  """Checks if nodes have enough physical CPUs
2184

2185
  This function checks if all given nodes have the needed number of
2186
  physical CPUs. In case any node has less CPUs or we cannot get the
2187
  information from the node, this function raises an OpPrereqError
2188
  exception.
2189

2190
  @type lu: C{LogicalUnit}
2191
  @param lu: a logical unit from which we get configuration data
2192
  @type node_uuids: C{list}
2193
  @param node_uuids: the list of node UUIDs to check
2194
  @type requested: C{int}
2195
  @param requested: the minimum acceptable number of physical CPUs
2196
  @type hypervisor_specs: list of pairs (string, dict of strings)
2197
  @param hypervisor_specs: list of hypervisor specifications in
2198
      pairs (hypervisor_name, hvparams)
2199
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2200
      or we cannot check the node
2201

2202
  """
2203
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2204
  for node_uuid in node_uuids:
2205
    info = nodeinfo[node_uuid]
2206
    node_name = lu.cfg.GetNodeName(node_uuid)
2207
    info.Raise("Cannot get current information from node %s" % node_name,
2208
               prereq=True, ecode=errors.ECODE_ENVIRON)
2209
    (_, _, (hv_info, )) = info.payload
2210
    num_cpus = hv_info.get("cpu_total", None)
2211
    if not isinstance(num_cpus, int):
2212
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2213
                                 " on node %s, result was '%s'" %
2214
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2215
    if requested > num_cpus:
2216
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2217
                                 "required" % (node_name, num_cpus, requested),
2218
                                 errors.ECODE_NORES)
2219

    
2220

    
2221
def GetItemFromContainer(identifier, kind, container):
2222
  """Return the item refered by the identifier.
2223

2224
  @type identifier: string
2225
  @param identifier: Item index or name or UUID
2226
  @type kind: string
2227
  @param kind: One-word item description
2228
  @type container: list
2229
  @param container: Container to get the item from
2230

2231
  """
2232
  # Index
2233
  try:
2234
    idx = int(identifier)
2235
    if idx == -1:
2236
      # Append
2237
      absidx = len(container) - 1
2238
    elif idx < 0:
2239
      raise IndexError("Not accepting negative indices other than -1")
2240
    elif idx > len(container):
2241
      raise IndexError("Got %s index %s, but there are only %s" %
2242
                       (kind, idx, len(container)))
2243
    else:
2244
      absidx = idx
2245
    return (absidx, container[idx])
2246
  except ValueError:
2247
    pass
2248

    
2249
  for idx, item in enumerate(container):
2250
    if item.uuid == identifier or item.name == identifier:
2251
      return (idx, item)
2252

    
2253
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2254
                             (kind, identifier), errors.ECODE_NOENT)
2255

    
2256

    
2257
def _ApplyContainerMods(kind, container, chgdesc, mods,
2258
                        create_fn, modify_fn, remove_fn,
2259
                        post_add_fn=None):
2260
  """Applies descriptions in C{mods} to C{container}.
2261

2262
  @type kind: string
2263
  @param kind: One-word item description
2264
  @type container: list
2265
  @param container: Container to modify
2266
  @type chgdesc: None or list
2267
  @param chgdesc: List of applied changes
2268
  @type mods: list
2269
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2270
  @type create_fn: callable
2271
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2272
    receives absolute item index, parameters and private data object as added
2273
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2274
    as list
2275
  @type modify_fn: callable
2276
  @param modify_fn: Callback for modifying an existing item
2277
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2278
    and private data object as added by L{_PrepareContainerMods}, returns
2279
    changes as list
2280
  @type remove_fn: callable
2281
  @param remove_fn: Callback on removing item; receives absolute item index,
2282
    item and private data object as added by L{_PrepareContainerMods}
2283
  @type post_add_fn: callable
2284
  @param post_add_fn: Callable for post-processing a newly created item after
2285
    it has been put into the container. It receives the index of the new item
2286
    and the new item as parameters.
2287

2288
  """
2289
  for (op, identifier, params, private) in mods:
2290
    changes = None
2291

    
2292
    if op == constants.DDM_ADD:
2293
      # Calculate where item will be added
2294
      # When adding an item, identifier can only be an index
2295
      try:
2296
        idx = int(identifier)
2297
      except ValueError:
2298
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2299
                                   " identifier for %s" % constants.DDM_ADD,
2300
                                   errors.ECODE_INVAL)
2301
      if idx == -1:
2302
        addidx = len(container)
2303
      else:
2304
        if idx < 0:
2305
          raise IndexError("Not accepting negative indices other than -1")
2306
        elif idx > len(container):
2307
          raise IndexError("Got %s index %s, but there are only %s" %
2308
                           (kind, idx, len(container)))
2309
        addidx = idx
2310

    
2311
      if create_fn is None:
2312
        item = params
2313
      else:
2314
        (item, changes) = create_fn(addidx, params, private)
2315

    
2316
      if idx == -1:
2317
        container.append(item)
2318
      else:
2319
        assert idx >= 0
2320
        assert idx <= len(container)
2321
        # list.insert does so before the specified index
2322
        container.insert(idx, item)
2323

    
2324
      if post_add_fn is not None:
2325
        post_add_fn(addidx, item)
2326

    
2327
    else:
2328
      # Retrieve existing item
2329
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2330

    
2331
      if op == constants.DDM_REMOVE:
2332
        assert not params
2333

    
2334
        changes = [("%s/%s" % (kind, absidx), "remove")]
2335

    
2336
        if remove_fn is not None:
2337
          msg = remove_fn(absidx, item, private)
2338
          if msg:
2339
            changes.append(("%s/%s" % (kind, absidx), msg))
2340

    
2341
        assert container[absidx] == item
2342
        del container[absidx]
2343
      elif op == constants.DDM_MODIFY:
2344
        if modify_fn is not None:
2345
          changes = modify_fn(absidx, item, params, private)
2346
      else:
2347
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2348

    
2349
    assert _TApplyContModsCbChanges(changes)
2350

    
2351
    if not (chgdesc is None or changes is None):
2352
      chgdesc.extend(changes)
2353

    
2354

    
2355
def _UpdateIvNames(base_index, disks):
2356
  """Updates the C{iv_name} attribute of disks.
2357

2358
  @type disks: list of L{objects.Disk}
2359

2360
  """
2361
  for (idx, disk) in enumerate(disks):
2362
    disk.iv_name = "disk/%s" % (base_index + idx, )
2363

    
2364

    
2365
class LUInstanceSetParams(LogicalUnit):
2366
  """Modifies an instances's parameters.
2367

2368
  """
2369
  HPATH = "instance-modify"
2370
  HTYPE = constants.HTYPE_INSTANCE
2371
  REQ_BGL = False
2372

    
2373
  @staticmethod
2374
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2375
    assert ht.TList(mods)
2376
    assert not mods or len(mods[0]) in (2, 3)
2377

    
2378
    if mods and len(mods[0]) == 2:
2379
      result = []
2380

    
2381
      addremove = 0
2382
      for op, params in mods:
2383
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2384
          result.append((op, -1, params))
2385
          addremove += 1
2386

    
2387
          if addremove > 1:
2388
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2389
                                       " supported at a time" % kind,
2390
                                       errors.ECODE_INVAL)
2391
        else:
2392
          result.append((constants.DDM_MODIFY, op, params))
2393

    
2394
      assert verify_fn(result)
2395
    else:
2396
      result = mods
2397

    
2398
    return result
2399

    
2400
  @staticmethod
2401
  def _CheckMods(kind, mods, key_types, item_fn):
2402
    """Ensures requested disk/NIC modifications are valid.
2403

2404
    """
2405
    for (op, _, params) in mods:
2406
      assert ht.TDict(params)
2407

    
2408
      # If 'key_types' is an empty dict, we assume we have an
2409
      # 'ext' template and thus do not ForceDictType
2410
      if key_types:
2411
        utils.ForceDictType(params, key_types)
2412

    
2413
      if op == constants.DDM_REMOVE:
2414
        if params:
2415
          raise errors.OpPrereqError("No settings should be passed when"
2416
                                     " removing a %s" % kind,
2417
                                     errors.ECODE_INVAL)
2418
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2419
        item_fn(op, params)
2420
      else:
2421
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2422

    
2423
  def _VerifyDiskModification(self, op, params, excl_stor):
2424
    """Verifies a disk modification.
2425

2426
    """
2427
    if op == constants.DDM_ADD:
2428
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2429
      if mode not in constants.DISK_ACCESS_SET:
2430
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2431
                                   errors.ECODE_INVAL)
2432

    
2433
      size = params.get(constants.IDISK_SIZE, None)
2434
      if size is None:
2435
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2436
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2437
      size = int(size)
2438

    
2439
      params[constants.IDISK_SIZE] = size
2440
      name = params.get(constants.IDISK_NAME, None)
2441
      if name is not None and name.lower() == constants.VALUE_NONE:
2442
        params[constants.IDISK_NAME] = None
2443

    
2444
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2445

    
2446
    elif op == constants.DDM_MODIFY:
2447
      if constants.IDISK_SIZE in params:
2448
        raise errors.OpPrereqError("Disk size change not possible, use"
2449
                                   " grow-disk", errors.ECODE_INVAL)
2450

    
2451
      # Disk modification supports changing only the disk name and mode.
2452
      # Changing arbitrary parameters is allowed only for ext disk template",
2453
      if self.instance.disk_template != constants.DT_EXT:
2454
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2455

    
2456
      name = params.get(constants.IDISK_NAME, None)
2457
      if name is not None and name.lower() == constants.VALUE_NONE:
2458
        params[constants.IDISK_NAME] = None
2459

    
2460
  @staticmethod
2461
  def _VerifyNicModification(op, params):
2462
    """Verifies a network interface modification.
2463

2464
    """
2465
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2466
      ip = params.get(constants.INIC_IP, None)
2467
      name = params.get(constants.INIC_NAME, None)
2468
      req_net = params.get(constants.INIC_NETWORK, None)
2469
      link = params.get(constants.NIC_LINK, None)
2470
      mode = params.get(constants.NIC_MODE, None)
2471
      if name is not None and name.lower() == constants.VALUE_NONE:
2472
        params[constants.INIC_NAME] = None
2473
      if req_net is not None:
2474
        if req_net.lower() == constants.VALUE_NONE:
2475
          params[constants.INIC_NETWORK] = None
2476
          req_net = None
2477
        elif link is not None or mode is not None:
2478
          raise errors.OpPrereqError("If network is given"
2479
                                     " mode or link should not",
2480
                                     errors.ECODE_INVAL)
2481

    
2482
      if op == constants.DDM_ADD:
2483
        macaddr = params.get(constants.INIC_MAC, None)
2484
        if macaddr is None:
2485
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2486

    
2487
      if ip is not None:
2488
        if ip.lower() == constants.VALUE_NONE:
2489
          params[constants.INIC_IP] = None
2490
        else:
2491
          if ip.lower() == constants.NIC_IP_POOL:
2492
            if op == constants.DDM_ADD and req_net is None:
2493
              raise errors.OpPrereqError("If ip=pool, parameter network"
2494
                                         " cannot be none",
2495
                                         errors.ECODE_INVAL)
2496
          else:
2497
            if not netutils.IPAddress.IsValid(ip):
2498
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2499
                                         errors.ECODE_INVAL)
2500

    
2501
      if constants.INIC_MAC in params:
2502
        macaddr = params[constants.INIC_MAC]
2503
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2504
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2505

    
2506
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2507
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2508
                                     " modifying an existing NIC",
2509
                                     errors.ECODE_INVAL)
2510

    
2511
  def CheckArguments(self):
2512
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2513
            self.op.hvparams or self.op.beparams or self.op.os_name or
2514
            self.op.osparams or self.op.offline is not None or
2515
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2516
            self.op.instance_communication is not None):
2517
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2518

    
2519
    if self.op.hvparams:
2520
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2521
                           "hypervisor", "instance", "cluster")
2522

    
2523
    self.op.disks = self._UpgradeDiskNicMods(
2524
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2525
    self.op.nics = self._UpgradeDiskNicMods(
2526
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2527

    
2528
    if self.op.disks and self.op.disk_template is not None:
2529
      raise errors.OpPrereqError("Disk template conversion and other disk"
2530
                                 " changes not supported at the same time",
2531
                                 errors.ECODE_INVAL)
2532

    
2533
    if (self.op.disk_template and
2534
        self.op.disk_template in constants.DTS_INT_MIRROR and
2535
        self.op.remote_node is None):
2536
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2537
                                 " one requires specifying a secondary node",
2538
                                 errors.ECODE_INVAL)
2539

    
2540
    # Check NIC modifications
2541
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2542
                    self._VerifyNicModification)
2543

    
2544
    if self.op.pnode:
2545
      (self.op.pnode_uuid, self.op.pnode) = \
2546
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2547

    
2548
  def ExpandNames(self):
2549
    self._ExpandAndLockInstance()
2550
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2551
    # Can't even acquire node locks in shared mode as upcoming changes in
2552
    # Ganeti 2.6 will start to modify the node object on disk conversion
2553
    self.needed_locks[locking.LEVEL_NODE] = []
2554
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2555
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2556
    # Look node group to look up the ipolicy
2557
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2558

    
2559
  def DeclareLocks(self, level):
2560
    if level == locking.LEVEL_NODEGROUP:
2561
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2562
      # Acquire locks for the instance's nodegroups optimistically. Needs
2563
      # to be verified in CheckPrereq
2564
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2565
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2566
    elif level == locking.LEVEL_NODE:
2567
      self._LockInstancesNodes()
2568
      if self.op.disk_template and self.op.remote_node:
2569
        (self.op.remote_node_uuid, self.op.remote_node) = \
2570
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2571
                                self.op.remote_node)
2572
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2573
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2574
      # Copy node locks
2575
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2576
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2577

    
2578
  def BuildHooksEnv(self):
2579
    """Build hooks env.
2580

2581
    This runs on the master, primary and secondaries.
2582

2583
    """
2584
    args = {}
2585
    if constants.BE_MINMEM in self.be_new:
2586
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2587
    if constants.BE_MAXMEM in self.be_new:
2588
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2589
    if constants.BE_VCPUS in self.be_new:
2590
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2591
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2592
    # information at all.
2593

    
2594
    if self._new_nics is not None:
2595
      nics = []
2596

    
2597
      for nic in self._new_nics:
2598
        n = copy.deepcopy(nic)
2599
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2600
        n.nicparams = nicparams
2601
        nics.append(NICToTuple(self, n))
2602

    
2603
      args["nics"] = nics
2604

    
2605
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2606
    if self.op.disk_template:
2607
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2608
    if self.op.runtime_mem:
2609
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2610

    
2611
    return env
2612

    
2613
  def BuildHooksNodes(self):
2614
    """Build hooks nodes.
2615

2616
    """
2617
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2618
    return (nl, nl)
2619

    
2620
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2621
                              old_params, cluster, pnode_uuid):
2622

    
2623
    update_params_dict = dict([(key, params[key])
2624
                               for key in constants.NICS_PARAMETERS
2625
                               if key in params])
2626

    
2627
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2628
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2629

    
2630
    new_net_uuid = None
2631
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2632
    if new_net_uuid_or_name:
2633
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2634
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2635

    
2636
    if old_net_uuid:
2637
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2638

    
2639
    if new_net_uuid:
2640
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2641
      if not netparams:
2642
        raise errors.OpPrereqError("No netparams found for the network"
2643
                                   " %s, probably not connected" %
2644
                                   new_net_obj.name, errors.ECODE_INVAL)
2645
      new_params = dict(netparams)
2646
    else:
2647
      new_params = GetUpdatedParams(old_params, update_params_dict)
2648

    
2649
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2650

    
2651
    new_filled_params = cluster.SimpleFillNIC(new_params)
2652
    objects.NIC.CheckParameterSyntax(new_filled_params)
2653

    
2654
    new_mode = new_filled_params[constants.NIC_MODE]
2655
    if new_mode == constants.NIC_MODE_BRIDGED:
2656
      bridge = new_filled_params[constants.NIC_LINK]
2657
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2658
      if msg:
2659
        msg = "Error checking bridges on node '%s': %s" % \
2660
                (self.cfg.GetNodeName(pnode_uuid), msg)
2661
        if self.op.force:
2662
          self.warn.append(msg)
2663
        else:
2664
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2665

    
2666
    elif new_mode == constants.NIC_MODE_ROUTED:
2667
      ip = params.get(constants.INIC_IP, old_ip)
2668
      if ip is None:
2669
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2670
                                   " on a routed NIC", errors.ECODE_INVAL)
2671

    
2672
    elif new_mode == constants.NIC_MODE_OVS:
2673
      # TODO: check OVS link
2674
      self.LogInfo("OVS links are currently not checked for correctness")
2675

    
2676
    if constants.INIC_MAC in params:
2677
      mac = params[constants.INIC_MAC]
2678
      if mac is None:
2679
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2680
                                   errors.ECODE_INVAL)
2681
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2682
        # otherwise generate the MAC address
2683
        params[constants.INIC_MAC] = \
2684
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2685
      else:
2686
        # or validate/reserve the current one
2687
        try:
2688
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2689
        except errors.ReservationError:
2690
          raise errors.OpPrereqError("MAC address '%s' already in use"
2691
                                     " in cluster" % mac,
2692
                                     errors.ECODE_NOTUNIQUE)
2693
    elif new_net_uuid != old_net_uuid:
2694

    
2695
      def get_net_prefix(net_uuid):
2696
        mac_prefix = None
2697
        if net_uuid:
2698
          nobj = self.cfg.GetNetwork(net_uuid)
2699
          mac_prefix = nobj.mac_prefix
2700

    
2701
        return mac_prefix
2702

    
2703
      new_prefix = get_net_prefix(new_net_uuid)
2704
      old_prefix = get_net_prefix(old_net_uuid)
2705
      if old_prefix != new_prefix:
2706
        params[constants.INIC_MAC] = \
2707
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2708

    
2709
    # if there is a change in (ip, network) tuple
2710
    new_ip = params.get(constants.INIC_IP, old_ip)
2711
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2712
      if new_ip:
2713
        # if IP is pool then require a network and generate one IP
2714
        if new_ip.lower() == constants.NIC_IP_POOL:
2715
          if new_net_uuid:
2716
            try:
2717
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2718
            except errors.ReservationError:
2719
              raise errors.OpPrereqError("Unable to get a free IP"
2720
                                         " from the address pool",
2721
                                         errors.ECODE_STATE)
2722
            self.LogInfo("Chose IP %s from network %s",
2723
                         new_ip,
2724
                         new_net_obj.name)
2725
            params[constants.INIC_IP] = new_ip
2726
          else:
2727
            raise errors.OpPrereqError("ip=pool, but no network found",
2728
                                       errors.ECODE_INVAL)
2729
        # Reserve new IP if in the new network if any
2730
        elif new_net_uuid:
2731
          try:
2732
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2733
                               check=self.op.conflicts_check)
2734
            self.LogInfo("Reserving IP %s in network %s",
2735
                         new_ip, new_net_obj.name)
2736
          except errors.ReservationError:
2737
            raise errors.OpPrereqError("IP %s not available in network %s" %
2738
                                       (new_ip, new_net_obj.name),
2739
                                       errors.ECODE_NOTUNIQUE)
2740
        # new network is None so check if new IP is a conflicting IP
2741
        elif self.op.conflicts_check:
2742
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2743

    
2744
      # release old IP if old network is not None
2745
      if old_ip and old_net_uuid:
2746
        try:
2747
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2748
        except errors.AddressPoolError:
2749
          logging.warning("Release IP %s not contained in network %s",
2750
                          old_ip, old_net_obj.name)
2751

    
2752
    # there are no changes in (ip, network) tuple and old network is not None
2753
    elif (old_net_uuid is not None and
2754
          (req_link is not None or req_mode is not None)):
2755
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2756
                                 " a NIC that is connected to a network",
2757
                                 errors.ECODE_INVAL)
2758

    
2759
    private.params = new_params
2760
    private.filled = new_filled_params
2761

    
2762
  def _PreCheckDiskTemplate(self, pnode_info):
2763
    """CheckPrereq checks related to a new disk template."""
2764
    # Arguments are passed to avoid configuration lookups
2765
    pnode_uuid = self.instance.primary_node
2766
    if self.instance.disk_template == self.op.disk_template:
2767
      raise errors.OpPrereqError("Instance already has disk template %s" %
2768
                                 self.instance.disk_template,
2769
                                 errors.ECODE_INVAL)
2770

    
2771
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2772
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2773
                                 " cluster." % self.op.disk_template)
2774

    
2775
    if (self.instance.disk_template,
2776
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2777
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2778
                                 " %s to %s" % (self.instance.disk_template,
2779
                                                self.op.disk_template),
2780
                                 errors.ECODE_INVAL)
2781
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2782
                       msg="cannot change disk template")
2783
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2784
      if self.op.remote_node_uuid == pnode_uuid:
2785
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2786
                                   " as the primary node of the instance" %
2787
                                   self.op.remote_node, errors.ECODE_STATE)
2788
      CheckNodeOnline(self, self.op.remote_node_uuid)
2789
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2790
      # FIXME: here we assume that the old instance type is DT_PLAIN
2791
      assert self.instance.disk_template == constants.DT_PLAIN
2792
      disks = [{constants.IDISK_SIZE: d.size,
2793
                constants.IDISK_VG: d.logical_id[0]}
2794
               for d in self.instance.disks]
2795
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2796
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2797

    
2798
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2799
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2800
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2801
                                                              snode_group)
2802
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2803
                             ignore=self.op.ignore_ipolicy)
2804
      if pnode_info.group != snode_info.group:
2805
        self.LogWarning("The primary and secondary nodes are in two"
2806
                        " different node groups; the disk parameters"
2807
                        " from the first disk's node group will be"
2808
                        " used")
2809

    
2810
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2811
      # Make sure none of the nodes require exclusive storage
2812
      nodes = [pnode_info]
2813
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2814
        assert snode_info
2815
        nodes.append(snode_info)
2816
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2817
      if compat.any(map(has_es, nodes)):
2818
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2819
                  " storage is enabled" % (self.instance.disk_template,
2820
                                           self.op.disk_template))
2821
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2822

    
2823
  def _PreCheckDisks(self, ispec):
2824
    """CheckPrereq checks related to disk changes.
2825

2826
    @type ispec: dict
2827
    @param ispec: instance specs to be updated with the new disks
2828

2829
    """
2830
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2831

    
2832
    excl_stor = compat.any(
2833
      rpc.GetExclusiveStorageForNodes(self.cfg,
2834
                                      self.instance.all_nodes).values()
2835
      )
2836

    
2837
    # Check disk modifications. This is done here and not in CheckArguments
2838
    # (as with NICs), because we need to know the instance's disk template
2839
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2840
    if self.instance.disk_template == constants.DT_EXT:
2841
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2842
    else:
2843
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2844
                      ver_fn)
2845

    
2846
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2847

    
2848
    # Check the validity of the `provider' parameter
2849
    if self.instance.disk_template in constants.DT_EXT:
2850
      for mod in self.diskmod:
2851
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2852
        if mod[0] == constants.DDM_ADD:
2853
          if ext_provider is None:
2854
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2855
                                       " '%s' missing, during disk add" %
2856
                                       (constants.DT_EXT,
2857
                                        constants.IDISK_PROVIDER),
2858
                                       errors.ECODE_NOENT)
2859
        elif mod[0] == constants.DDM_MODIFY:
2860
          if ext_provider:
2861
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2862
                                       " modification" %
2863
                                       constants.IDISK_PROVIDER,
2864
                                       errors.ECODE_INVAL)
2865
    else:
2866
      for mod in self.diskmod:
2867
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2868
        if ext_provider is not None:
2869
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2870
                                     " instances of type '%s'" %
2871
                                     (constants.IDISK_PROVIDER,
2872
                                      constants.DT_EXT),
2873
                                     errors.ECODE_INVAL)
2874

    
2875
    if not self.op.wait_for_sync and self.instance.disks_active:
2876
      for mod in self.diskmod:
2877
        if mod[0] == constants.DDM_ADD:
2878
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2879
                                     " activated disks and"
2880
                                     " --no-wait-for-sync given.",
2881
                                     errors.ECODE_INVAL)
2882

    
2883
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2884
      raise errors.OpPrereqError("Disk operations not supported for"
2885
                                 " diskless instances", errors.ECODE_INVAL)
2886

    
2887
    def _PrepareDiskMod(_, disk, params, __):
2888
      disk.name = params.get(constants.IDISK_NAME, None)
2889

    
2890
    # Verify disk changes (operating on a copy)
2891
    disks = copy.deepcopy(self.instance.disks)
2892
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2893
                        _PrepareDiskMod, None)
2894
    utils.ValidateDeviceNames("disk", disks)
2895
    if len(disks) > constants.MAX_DISKS:
2896
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2897
                                 " more" % constants.MAX_DISKS,
2898
                                 errors.ECODE_STATE)
2899
    disk_sizes = [disk.size for disk in self.instance.disks]
2900
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2901
                      self.diskmod if op == constants.DDM_ADD)
2902
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2903
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2904

    
2905
    if self.op.offline is not None and self.op.offline:
2906
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2907
                         msg="can't change to offline")
2908

    
2909
  @staticmethod
2910
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2911
    """Create a NIC mod that adds or removes the instance
2912
    communication NIC to a running instance.
2913

2914
    The NICS are dynamically created using the Dynamic Device
2915
    Modification (DDM).  This function produces a NIC modification
2916
    (mod) that inserts an additional NIC meant for instance
2917
    communication in or removes an existing instance communication NIC
2918
    from a running instance, using DDM.
2919

2920
    @type cfg: L{config.ConfigWriter}
2921
    @param cfg: cluster configuration
2922

2923
    @type instance_communication: boolean
2924
    @param instance_communication: whether instance communication is
2925
                                   enabled or disabled
2926

2927
    @type instance: L{objects.Instance}
2928
    @param instance: instance to which the NIC mod will be applied to
2929

2930
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
2931
            (L{constants.DDM_REMOVE}, -1, parameters) or
2932
            L{None}
2933
    @return: DDM mod containing an action to add or remove the NIC, or
2934
             None if nothing needs to be done
2935

2936
    """
2937
    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
2938

    
2939
    instance_communication_nic = None
2940

    
2941
    for nic in instance.nics:
2942
      if nic.name == nic_name:
2943
        instance_communication_nic = nic
2944
        break
2945

    
2946
    if instance_communication and not instance_communication_nic:
2947
      action = constants.DDM_ADD
2948
      params = {constants.INIC_NAME: nic_name,
2949
                constants.INIC_MAC: constants.VALUE_GENERATE,
2950
                constants.INIC_IP: constants.NIC_IP_POOL,
2951
                constants.INIC_NETWORK:
2952
                  cfg.GetInstanceCommunicationNetwork()}
2953
    elif not instance_communication and instance_communication_nic:
2954
      action = constants.DDM_REMOVE
2955
      params = None
2956
    else:
2957
      action = None
2958
      params = None
2959

    
2960
    if action is not None:
2961
      return (action, -1, params)
2962
    else:
2963
      return None
2964

    
2965
  def CheckPrereq(self):
2966
    """Check prerequisites.
2967

2968
    This only checks the instance list against the existing names.
2969

2970
    """
2971
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2972
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2973
    self.cluster = self.cfg.GetClusterInfo()
2974
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2975

    
2976
    assert self.instance is not None, \
2977
      "Cannot retrieve locked instance %s" % self.op.instance_name
2978

    
2979
    pnode_uuid = self.instance.primary_node
2980

    
2981
    self.warn = []
2982

    
2983
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2984
        not self.op.force):
2985
      # verify that the instance is not up
2986
      instance_info = self.rpc.call_instance_info(
2987
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2988
          cluster_hvparams)
2989
      if instance_info.fail_msg:
2990
        self.warn.append("Can't get instance runtime information: %s" %
2991
                         instance_info.fail_msg)
2992
      elif instance_info.payload:
2993
        raise errors.OpPrereqError("Instance is still running on %s" %
2994
                                   self.cfg.GetNodeName(pnode_uuid),
2995
                                   errors.ECODE_STATE)
2996

    
2997
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2998
    node_uuids = list(self.instance.all_nodes)
2999
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3000

    
3001
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3002
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3003
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3004

    
3005
    # dictionary with instance information after the modification
3006
    ispec = {}
3007

    
3008
    if self.op.hotplug or self.op.hotplug_if_possible:
3009
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3010
                                               self.instance)
3011
      if result.fail_msg:
3012
        if self.op.hotplug:
3013
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3014
                       prereq=True)
3015
        else:
3016
          self.LogWarning(result.fail_msg)
3017
          self.op.hotplug = False
3018
          self.LogInfo("Modification will take place without hotplugging.")
3019
      else:
3020
        self.op.hotplug = True
3021

    
3022
    # Prepare NIC modifications
3023
    # add or remove NIC for instance communication
3024
    if self.op.instance_communication is not None:
3025
      mod = self._InstanceCommunicationDDM(self.cfg,
3026
                                           self.op.instance_communication,
3027
                                           self.instance)
3028
      if mod is not None:
3029
        self.op.nics.append(mod)
3030

    
3031
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3032

    
3033
    # OS change
3034
    if self.op.os_name and not self.op.force:
3035
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
3036
                     self.op.force_variant)
3037
      instance_os = self.op.os_name
3038
    else:
3039
      instance_os = self.instance.os
3040

    
3041
    assert not (self.op.disk_template and self.op.disks), \
3042
      "Can't modify disk template and apply disk changes at the same time"
3043

    
3044
    if self.op.disk_template:
3045
      self._PreCheckDiskTemplate(pnode_info)
3046

    
3047
    self._PreCheckDisks(ispec)
3048

    
3049
    # hvparams processing
3050
    if self.op.hvparams:
3051
      hv_type = self.instance.hypervisor
3052
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3053
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3054
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3055

    
3056
      # local check
3057
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3058
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3059
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3060
      self.hv_inst = i_hvdict # the new dict (without defaults)
3061
    else:
3062
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3063
                                                   self.instance.os,
3064
                                                   self.instance.hvparams)
3065
      self.hv_new = self.hv_inst = {}
3066

    
3067
    # beparams processing
3068
    if self.op.beparams:
3069
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3070
                                  use_none=True)
3071
      objects.UpgradeBeParams(i_bedict)
3072
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3073
      be_new = self.cluster.SimpleFillBE(i_bedict)
3074
      self.be_proposed = self.be_new = be_new # the new actual values
3075
      self.be_inst = i_bedict # the new dict (without defaults)
3076
    else:
3077
      self.be_new = self.be_inst = {}
3078
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3079
    be_old = self.cluster.FillBE(self.instance)
3080

    
3081
    # CPU param validation -- checking every time a parameter is
3082
    # changed to cover all cases where either CPU mask or vcpus have
3083
    # changed
3084
    if (constants.BE_VCPUS in self.be_proposed and
3085
        constants.HV_CPU_MASK in self.hv_proposed):
3086
      cpu_list = \
3087
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3088
      # Verify mask is consistent with number of vCPUs. Can skip this
3089
      # test if only 1 entry in the CPU mask, which means same mask
3090
      # is applied to all vCPUs.
3091
      if (len(cpu_list) > 1 and
3092
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3093
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3094
                                   " CPU mask [%s]" %
3095
                                   (self.be_proposed[constants.BE_VCPUS],
3096
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3097
                                   errors.ECODE_INVAL)
3098

    
3099
      # Only perform this test if a new CPU mask is given
3100
      if constants.HV_CPU_MASK in self.hv_new:
3101
        # Calculate the largest CPU number requested
3102
        max_requested_cpu = max(map(max, cpu_list))
3103
        # Check that all of the instance's nodes have enough physical CPUs to
3104
        # satisfy the requested CPU mask
3105
        hvspecs = [(self.instance.hypervisor,
3106
                    self.cfg.GetClusterInfo()
3107
                      .hvparams[self.instance.hypervisor])]
3108
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3109
                                max_requested_cpu + 1,
3110
                                hvspecs)
3111

    
3112
    # osparams processing
3113
    if self.op.osparams or self.op.osparams_private:
3114
      public_parms = self.op.osparams or {}
3115
      private_parms = self.op.osparams_private or {}
3116
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3117

    
3118
      if dupe_keys:
3119
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3120
                                   utils.CommaJoin(dupe_keys))
3121

    
3122
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3123
                                      public_parms)
3124
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3125
                                              private_parms)
3126

    
3127
      CheckOSParams(self, True, node_uuids, instance_os,
3128
                    objects.FillDict(self.os_inst,
3129
                                     self.os_inst_private))
3130

    
3131
    else:
3132
      self.os_inst = {}
3133
      self.os_inst_private = {}
3134

    
3135
    #TODO(dynmem): do the appropriate check involving MINMEM
3136
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3137
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3138
      mem_check_list = [pnode_uuid]
3139
      if be_new[constants.BE_AUTO_BALANCE]:
3140
        # either we changed auto_balance to yes or it was from before
3141
        mem_check_list.extend(self.instance.secondary_nodes)
3142
      instance_info = self.rpc.call_instance_info(
3143
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3144
          cluster_hvparams)
3145
      hvspecs = [(self.instance.hypervisor,
3146
                  cluster_hvparams)]
3147
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3148
                                         hvspecs)
3149
      pninfo = nodeinfo[pnode_uuid]
3150
      msg = pninfo.fail_msg
3151
      if msg:
3152
        # Assume the primary node is unreachable and go ahead
3153
        self.warn.append("Can't get info from primary node %s: %s" %
3154
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3155
      else:
3156
        (_, _, (pnhvinfo, )) = pninfo.payload
3157
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3158
          self.warn.append("Node data from primary node %s doesn't contain"
3159
                           " free memory information" %
3160
                           self.cfg.GetNodeName(pnode_uuid))
3161
        elif instance_info.fail_msg:
3162
          self.warn.append("Can't get instance runtime information: %s" %
3163
                           instance_info.fail_msg)
3164
        else:
3165
          if instance_info.payload:
3166
            current_mem = int(instance_info.payload["memory"])
3167
          else:
3168
            # Assume instance not running
3169
            # (there is a slight race condition here, but it's not very
3170
            # probable, and we have no other way to check)
3171
            # TODO: Describe race condition
3172
            current_mem = 0
3173
          #TODO(dynmem): do the appropriate check involving MINMEM
3174
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3175
                      pnhvinfo["memory_free"])
3176
          if miss_mem > 0:
3177
            raise errors.OpPrereqError("This change will prevent the instance"
3178
                                       " from starting, due to %d MB of memory"
3179
                                       " missing on its primary node" %
3180
                                       miss_mem, errors.ECODE_NORES)
3181

    
3182
      if be_new[constants.BE_AUTO_BALANCE]:
3183
        for node_uuid, nres in nodeinfo.items():
3184
          if node_uuid not in self.instance.secondary_nodes:
3185
            continue
3186
          nres.Raise("Can't get info from secondary node %s" %
3187
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3188
                     ecode=errors.ECODE_STATE)
3189
          (_, _, (nhvinfo, )) = nres.payload
3190
          if not isinstance(nhvinfo.get("memory_free", None), int):
3191
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3192
                                       " memory information" %
3193
                                       self.cfg.GetNodeName(node_uuid),
3194
                                       errors.ECODE_STATE)
3195
          #TODO(dynmem): do the appropriate check involving MINMEM
3196
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3197
            raise errors.OpPrereqError("This change will prevent the instance"
3198
                                       " from failover to its secondary node"
3199
                                       " %s, due to not enough memory" %
3200
                                       self.cfg.GetNodeName(node_uuid),
3201
                                       errors.ECODE_STATE)
3202

    
3203
    if self.op.runtime_mem:
3204
      remote_info = self.rpc.call_instance_info(
3205
         self.instance.primary_node, self.instance.name,
3206
         self.instance.hypervisor,
3207
         cluster_hvparams)
3208
      remote_info.Raise("Error checking node %s" %
3209
                        self.cfg.GetNodeName(self.instance.primary_node))
3210
      if not remote_info.payload: # not running already
3211
        raise errors.OpPrereqError("Instance %s is not running" %
3212
                                   self.instance.name, errors.ECODE_STATE)
3213

    
3214
      current_memory = remote_info.payload["memory"]
3215
      if (not self.op.force and
3216
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3217
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3218
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3219
                                   " and %d MB of memory unless --force is"
3220
                                   " given" %
3221
                                   (self.instance.name,
3222
                                    self.be_proposed[constants.BE_MINMEM],
3223
                                    self.be_proposed[constants.BE_MAXMEM]),
3224
                                   errors.ECODE_INVAL)
3225

    
3226
      delta = self.op.runtime_mem - current_memory
3227
      if delta > 0:
3228
        CheckNodeFreeMemory(
3229
            self, self.instance.primary_node,
3230
            "ballooning memory for instance %s" % self.instance.name, delta,
3231
            self.instance.hypervisor,
3232
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3233

    
3234
    # make self.cluster visible in the functions below
3235
    cluster = self.cluster
3236

    
3237
    def _PrepareNicCreate(_, params, private):
3238
      self._PrepareNicModification(params, private, None, None,
3239
                                   {}, cluster, pnode_uuid)
3240
      return (None, None)
3241

    
3242
    def _PrepareNicMod(_, nic, params, private):
3243
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3244
                                   nic.nicparams, cluster, pnode_uuid)
3245
      return None
3246

    
3247
    def _PrepareNicRemove(_, params, __):
3248
      ip = params.ip
3249
      net = params.network
3250
      if net is not None and ip is not None:
3251
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3252

    
3253
    # Verify NIC changes (operating on copy)
3254
    nics = [nic.Copy() for nic in self.instance.nics]
3255
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3256
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3257
    if len(nics) > constants.MAX_NICS:
3258
      raise errors.OpPrereqError("Instance has too many network interfaces"
3259
                                 " (%d), cannot add more" % constants.MAX_NICS,
3260
                                 errors.ECODE_STATE)
3261

    
3262
    # Pre-compute NIC changes (necessary to use result in hooks)
3263
    self._nic_chgdesc = []
3264
    if self.nicmod:
3265
      # Operate on copies as this is still in prereq
3266
      nics = [nic.Copy() for nic in self.instance.nics]
3267
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3268
                          self._CreateNewNic, self._ApplyNicMods,
3269
                          self._RemoveNic)
3270
      # Verify that NIC names are unique and valid
3271
      utils.ValidateDeviceNames("NIC", nics)
3272
      self._new_nics = nics
3273
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3274
    else:
3275
      self._new_nics = None
3276
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3277

    
3278
    if not self.op.ignore_ipolicy:
3279
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3280
                                                              group_info)
3281

    
3282
      # Fill ispec with backend parameters
3283
      ispec[constants.ISPEC_SPINDLE_USE] = \
3284
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3285
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3286
                                                         None)
3287

    
3288
      # Copy ispec to verify parameters with min/max values separately
3289
      if self.op.disk_template:
3290
        new_disk_template = self.op.disk_template
3291
      else:
3292
        new_disk_template = self.instance.disk_template
3293
      ispec_max = ispec.copy()
3294
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3295
        self.be_new.get(constants.BE_MAXMEM, None)
3296
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3297
                                                     new_disk_template)
3298
      ispec_min = ispec.copy()
3299
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3300
        self.be_new.get(constants.BE_MINMEM, None)
3301
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3302
                                                     new_disk_template)
3303

    
3304
      if (res_max or res_min):
3305
        # FIXME: Improve error message by including information about whether
3306
        # the upper or lower limit of the parameter fails the ipolicy.
3307
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3308
               (group_info, group_info.name,
3309
                utils.CommaJoin(set(res_max + res_min))))
3310
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3311

    
3312
  def _ConvertPlainToDrbd(self, feedback_fn):
3313
    """Converts an instance from plain to drbd.
3314

3315
    """
3316
    feedback_fn("Converting template to drbd")
3317
    pnode_uuid = self.instance.primary_node
3318
    snode_uuid = self.op.remote_node_uuid
3319

    
3320
    assert self.instance.disk_template == constants.DT_PLAIN
3321

    
3322
    # create a fake disk info for _GenerateDiskTemplate
3323
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3324
                  constants.IDISK_VG: d.logical_id[0],
3325
                  constants.IDISK_NAME: d.name}
3326
                 for d in self.instance.disks]
3327
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3328
                                     self.instance.uuid, pnode_uuid,
3329
                                     [snode_uuid], disk_info, None, None, 0,
3330
                                     feedback_fn, self.diskparams)
3331
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3332
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3333
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3334
    info = GetInstanceInfoText(self.instance)
3335
    feedback_fn("Creating additional volumes...")
3336
    # first, create the missing data and meta devices
3337
    for disk in anno_disks:
3338
      # unfortunately this is... not too nice
3339
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3340
                           info, True, p_excl_stor)
3341
      for child in disk.children:
3342
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3343
                             s_excl_stor)
3344
    # at this stage, all new LVs have been created, we can rename the
3345
    # old ones
3346
    feedback_fn("Renaming original volumes...")
3347
    rename_list = [(o, n.children[0].logical_id)
3348
                   for (o, n) in zip(self.instance.disks, new_disks)]
3349
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3350
    result.Raise("Failed to rename original LVs")
3351

    
3352
    feedback_fn("Initializing DRBD devices...")
3353
    # all child devices are in place, we can now create the DRBD devices
3354
    try:
3355
      for disk in anno_disks:
3356
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3357
                                       (snode_uuid, s_excl_stor)]:
3358
          f_create = node_uuid == pnode_uuid
3359
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3360
                               f_create, excl_stor)
3361
    except errors.GenericError, e:
3362
      feedback_fn("Initializing of DRBD devices failed;"
3363
                  " renaming back original volumes...")
3364
      rename_back_list = [(n.children[0], o.logical_id)
3365
                          for (n, o) in zip(new_disks, self.instance.disks)]
3366
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3367
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3368
      raise
3369

    
3370
    # at this point, the instance has been modified
3371
    self.instance.disk_template = constants.DT_DRBD8
3372
    self.instance.disks = new_disks
3373
    self.cfg.Update(self.instance, feedback_fn)
3374

    
3375
    # Release node locks while waiting for sync
3376
    ReleaseLocks(self, locking.LEVEL_NODE)
3377

    
3378
    # disks are created, waiting for sync
3379
    disk_abort = not WaitForSync(self, self.instance,
3380
                                 oneshot=not self.op.wait_for_sync)
3381
    if disk_abort:
3382
      raise errors.OpExecError("There are some degraded disks for"
3383
                               " this instance, please cleanup manually")
3384

    
3385
    # Node resource locks will be released by caller
3386

    
3387
  def _ConvertDrbdToPlain(self, feedback_fn):
3388
    """Converts an instance from drbd to plain.
3389

3390
    """
3391
    assert len(self.instance.secondary_nodes) == 1
3392
    assert self.instance.disk_template == constants.DT_DRBD8
3393

    
3394
    pnode_uuid = self.instance.primary_node
3395
    snode_uuid = self.instance.secondary_nodes[0]
3396
    feedback_fn("Converting template to plain")
3397

    
3398
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3399
    new_disks = [d.children[0] for d in self.instance.disks]
3400

    
3401
    # copy over size, mode and name
3402
    for parent, child in zip(old_disks, new_disks):
3403
      child.size = parent.size
3404
      child.mode = parent.mode
3405
      child.name = parent.name
3406

    
3407
    # this is a DRBD disk, return its port to the pool
3408
    # NOTE: this must be done right before the call to cfg.Update!
3409
    for disk in old_disks:
3410
      tcp_port = disk.logical_id[2]
3411
      self.cfg.AddTcpUdpPort(tcp_port)
3412

    
3413
    # update instance structure
3414
    self.instance.disks = new_disks
3415
    self.instance.disk_template = constants.DT_PLAIN
3416
    _UpdateIvNames(0, self.instance.disks)
3417
    self.cfg.Update(self.instance, feedback_fn)
3418

    
3419
    # Release locks in case removing disks takes a while
3420
    ReleaseLocks(self, locking.LEVEL_NODE)
3421

    
3422
    feedback_fn("Removing volumes on the secondary node...")
3423
    for disk in old_disks:
3424
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3425
      result.Warn("Could not remove block device %s on node %s,"
3426
                  " continuing anyway" %
3427
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3428
                  self.LogWarning)
3429

    
3430
    feedback_fn("Removing unneeded volumes on the primary node...")
3431
    for idx, disk in enumerate(old_disks):
3432
      meta = disk.children[1]
3433
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3434
      result.Warn("Could not remove metadata for disk %d on node %s,"
3435
                  " continuing anyway" %
3436
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3437
                  self.LogWarning)
3438

    
3439
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3440
    self.LogInfo("Trying to hotplug device...")
3441
    msg = "hotplug:"
3442
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3443
                                          self.instance, action, dev_type,
3444
                                          (device, self.instance),
3445
                                          extra, seq)
3446
    if result.fail_msg:
3447
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3448
      self.LogInfo("Continuing execution..")
3449
      msg += "failed"
3450
    else:
3451
      self.LogInfo("Hotplug done.")
3452
      msg += "done"
3453
    return msg
3454

    
3455
  def _CreateNewDisk(self, idx, params, _):
3456
    """Creates a new disk.
3457

3458
    """
3459
    # add a new disk
3460
    if self.instance.disk_template in constants.DTS_FILEBASED:
3461
      (file_driver, file_path) = self.instance.disks[0].logical_id
3462
      file_path = os.path.dirname(file_path)
3463
    else:
3464
      file_driver = file_path = None
3465

    
3466
    disk = \
3467
      GenerateDiskTemplate(self, self.instance.disk_template,
3468
                           self.instance.uuid, self.instance.primary_node,
3469
                           self.instance.secondary_nodes, [params], file_path,
3470
                           file_driver, idx, self.Log, self.diskparams)[0]
3471

    
3472
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3473

    
3474
    if self.cluster.prealloc_wipe_disks:
3475
      # Wipe new disk
3476
      WipeOrCleanupDisks(self, self.instance,
3477
                         disks=[(idx, disk, 0)],
3478
                         cleanup=new_disks)
3479

    
3480
    changes = [
3481
      ("disk/%d" % idx,
3482
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3483
      ]
3484
    if self.op.hotplug:
3485
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3486
                                               (disk, self.instance),
3487
                                               self.instance.name, True, idx)
3488
      if result.fail_msg:
3489
        changes.append(("disk/%d" % idx, "assemble:failed"))
3490
        self.LogWarning("Can't assemble newly created disk %d: %s",
3491
                        idx, result.fail_msg)
3492
      else:
3493
        _, link_name = result.payload
3494
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3495
                                  constants.HOTPLUG_TARGET_DISK,
3496
                                  disk, link_name, idx)
3497
        changes.append(("disk/%d" % idx, msg))
3498

    
3499
    return (disk, changes)
3500

    
3501
  def _PostAddDisk(self, _, disk):
3502
    if not WaitForSync(self, self.instance, disks=[disk],
3503
                       oneshot=not self.op.wait_for_sync):
3504
      raise errors.OpExecError("Failed to sync disks of %s" %
3505
                               self.instance.name)
3506

    
3507
    # the disk is active at this point, so deactivate it if the instance disks
3508
    # are supposed to be inactive
3509
    if not self.instance.disks_active:
3510
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3511

    
3512
  def _ModifyDisk(self, idx, disk, params, _):
3513
    """Modifies a disk.
3514

3515
    """
3516
    changes = []
3517
    if constants.IDISK_MODE in params:
3518
      disk.mode = params.get(constants.IDISK_MODE)
3519
      changes.append(("disk.mode/%d" % idx, disk.mode))
3520

    
3521
    if constants.IDISK_NAME in params:
3522
      disk.name = params.get(constants.IDISK_NAME)
3523
      changes.append(("disk.name/%d" % idx, disk.name))
3524

    
3525
    # Modify arbitrary params in case instance template is ext
3526
    for key, value in params.iteritems():
3527
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3528
          self.instance.disk_template == constants.DT_EXT):
3529
        # stolen from GetUpdatedParams: default means reset/delete
3530
        if value.lower() == constants.VALUE_DEFAULT:
3531
          try:
3532
            del disk.params[key]
3533
          except KeyError:
3534
            pass
3535
        else:
3536
          disk.params[key] = value
3537
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3538

    
3539
    return changes
3540

    
3541
  def _RemoveDisk(self, idx, root, _):
3542
    """Removes a disk.
3543

3544
    """
3545
    hotmsg = ""
3546
    if self.op.hotplug:
3547
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3548
                                   constants.HOTPLUG_TARGET_DISK,
3549
                                   root, None, idx)
3550
      ShutdownInstanceDisks(self, self.instance, [root])
3551

    
3552
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3553
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3554
                             self.instance.primary_node):
3555
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3556
              .fail_msg
3557
      if msg:
3558
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3559
                        " continuing anyway", idx,
3560
                        self.cfg.GetNodeName(node_uuid), msg)
3561

    
3562
    # if this is a DRBD disk, return its port to the pool
3563
    if root.dev_type in constants.DTS_DRBD:
3564
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3565

    
3566
    return hotmsg
3567

    
3568
  def _CreateNewNic(self, idx, params, private):
3569
    """Creates data structure for a new network interface.
3570

3571
    """
3572
    mac = params[constants.INIC_MAC]
3573
    ip = params.get(constants.INIC_IP, None)
3574
    net = params.get(constants.INIC_NETWORK, None)
3575
    name = params.get(constants.INIC_NAME, None)
3576
    net_uuid = self.cfg.LookupNetwork(net)
3577
    #TODO: not private.filled?? can a nic have no nicparams??
3578
    nicparams = private.filled
3579
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3580
                       nicparams=nicparams)
3581
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3582

    
3583
    changes = [
3584
      ("nic.%d" % idx,
3585
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3586
       (mac, ip, private.filled[constants.NIC_MODE],
3587
       private.filled[constants.NIC_LINK], net)),
3588
      ]
3589

    
3590
    if self.op.hotplug:
3591
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3592
                                constants.HOTPLUG_TARGET_NIC,
3593
                                nobj, None, idx)
3594
      changes.append(("nic.%d" % idx, msg))
3595

    
3596
    return (nobj, changes)
3597

    
3598
  def _ApplyNicMods(self, idx, nic, params, private):
3599
    """Modifies a network interface.
3600

3601
    """
3602
    changes = []
3603

    
3604
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3605
      if key in params:
3606
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3607
        setattr(nic, key, params[key])
3608

    
3609
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3610
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3611
    if new_net_uuid != nic.network:
3612
      changes.append(("nic.network/%d" % idx, new_net))
3613
      nic.network = new_net_uuid
3614

    
3615
    if private.filled:
3616
      nic.nicparams = private.filled
3617

    
3618
      for (key, val) in nic.nicparams.items():
3619
        changes.append(("nic.%s/%d" % (key, idx), val))
3620

    
3621
    if self.op.hotplug:
3622
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3623
                                constants.HOTPLUG_TARGET_NIC,
3624
                                nic, None, idx)
3625
      changes.append(("nic/%d" % idx, msg))
3626

    
3627
    return changes
3628

    
3629
  def _RemoveNic(self, idx, nic, _):
3630
    if self.op.hotplug:
3631
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3632
                                 constants.HOTPLUG_TARGET_NIC,
3633
                                 nic, None, idx)
3634

    
3635
  def Exec(self, feedback_fn):
3636
    """Modifies an instance.
3637

3638
    All parameters take effect only at the next restart of the instance.
3639

3640
    """
3641
    # Process here the warnings from CheckPrereq, as we don't have a
3642
    # feedback_fn there.
3643
    # TODO: Replace with self.LogWarning
3644
    for warn in self.warn:
3645
      feedback_fn("WARNING: %s" % warn)
3646

    
3647
    assert ((self.op.disk_template is None) ^
3648
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3649
      "Not owning any node resource locks"
3650

    
3651
    result = []
3652

    
3653
    # New primary node
3654
    if self.op.pnode_uuid:
3655
      self.instance.primary_node = self.op.pnode_uuid
3656

    
3657
    # runtime memory
3658
    if self.op.runtime_mem:
3659
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3660
                                                     self.instance,
3661
                                                     self.op.runtime_mem)
3662
      rpcres.Raise("Cannot modify instance runtime memory")
3663
      result.append(("runtime_memory", self.op.runtime_mem))
3664

    
3665
    # Apply disk changes
3666
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3667
                        self._CreateNewDisk, self._ModifyDisk,
3668
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3669
    _UpdateIvNames(0, self.instance.disks)
3670

    
3671
    if self.op.disk_template:
3672
      if __debug__:
3673
        check_nodes = set(self.instance.all_nodes)
3674
        if self.op.remote_node_uuid:
3675
          check_nodes.add(self.op.remote_node_uuid)
3676
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3677
          owned = self.owned_locks(level)
3678
          assert not (check_nodes - owned), \
3679
            ("Not owning the correct locks, owning %r, expected at least %r" %
3680
             (owned, check_nodes))
3681

    
3682
      r_shut = ShutdownInstanceDisks(self, self.instance)
3683
      if not r_shut:
3684
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3685
                                 " proceed with disk template conversion")
3686
      mode = (self.instance.disk_template, self.op.disk_template)
3687
      try:
3688
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3689
      except:
3690
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3691
        raise
3692
      result.append(("disk_template", self.op.disk_template))
3693

    
3694
      assert self.instance.disk_template == self.op.disk_template, \
3695
        ("Expected disk template '%s', found '%s'" %
3696
         (self.op.disk_template, self.instance.disk_template))
3697

    
3698
    # Release node and resource locks if there are any (they might already have
3699
    # been released during disk conversion)
3700
    ReleaseLocks(self, locking.LEVEL_NODE)
3701
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3702

    
3703
    # Apply NIC changes
3704
    if self._new_nics is not None:
3705
      self.instance.nics = self._new_nics
3706
      result.extend(self._nic_chgdesc)
3707

    
3708
    # hvparams changes
3709
    if self.op.hvparams:
3710
      self.instance.hvparams = self.hv_inst
3711
      for key, val in self.op.hvparams.iteritems():
3712
        result.append(("hv/%s" % key, val))
3713

    
3714
    # beparams changes
3715
    if self.op.beparams:
3716
      self.instance.beparams = self.be_inst
3717
      for key, val in self.op.beparams.iteritems():
3718
        result.append(("be/%s" % key, val))
3719

    
3720
    # OS change
3721
    if self.op.os_name:
3722
      self.instance.os = self.op.os_name
3723

    
3724
    # osparams changes
3725
    if self.op.osparams:
3726
      self.instance.osparams = self.os_inst
3727
      for key, val in self.op.osparams.iteritems():
3728
        result.append(("os/%s" % key, val))
3729

    
3730
    if self.op.osparams_private:
3731
      self.instance.osparams_private = self.os_inst_private
3732
      for key, val in self.op.osparams_private.iteritems():
3733
        # Show the Private(...) blurb.
3734
        result.append(("os_private/%s" % key, repr(val)))
3735

    
3736
    if self.op.offline is None:
3737
      # Ignore
3738
      pass
3739
    elif self.op.offline:
3740
      # Mark instance as offline
3741
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3742
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3743
    else:
3744
      # Mark instance as online, but stopped
3745
      self.cfg.MarkInstanceDown(self.instance.uuid)
3746
      result.append(("admin_state", constants.ADMINST_DOWN))
3747

    
3748
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3749

    
3750
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3751
                self.owned_locks(locking.LEVEL_NODE)), \
3752
      "All node locks should have been released by now"
3753

    
3754
    return result
3755

    
3756
  _DISK_CONVERSIONS = {
3757
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3758
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3759
    }
3760

    
3761

    
3762
class LUInstanceChangeGroup(LogicalUnit):
3763
  HPATH = "instance-change-group"
3764
  HTYPE = constants.HTYPE_INSTANCE
3765
  REQ_BGL = False
3766

    
3767
  def ExpandNames(self):
3768
    self.share_locks = ShareAll()
3769

    
3770
    self.needed_locks = {
3771
      locking.LEVEL_NODEGROUP: [],
3772
      locking.LEVEL_NODE: [],
3773
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3774
      }
3775

    
3776
    self._ExpandAndLockInstance()
3777

    
3778
    if self.op.target_groups:
3779
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3780
                                  self.op.target_groups)
3781
    else:
3782
      self.req_target_uuids = None
3783

    
3784
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3785

    
3786
  def DeclareLocks(self, level):
3787
    if level == locking.LEVEL_NODEGROUP:
3788
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3789

    
3790
      if self.req_target_uuids:
3791
        lock_groups = set(self.req_target_uuids)
3792

    
3793
        # Lock all groups used by instance optimistically; this requires going
3794
        # via the node before it's locked, requiring verification later on
3795
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3796
        lock_groups.update(instance_groups)
3797
      else:
3798
        # No target groups, need to lock all of them
3799
        lock_groups = locking.ALL_SET
3800

    
3801
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3802

    
3803
    elif level == locking.LEVEL_NODE:
3804
      if self.req_target_uuids:
3805
        # Lock all nodes used by instances
3806
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3807
        self._LockInstancesNodes()
3808

    
3809
        # Lock all nodes in all potential target groups
3810
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3811
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3812
        member_nodes = [node_uuid
3813
                        for group in lock_groups
3814
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3815
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3816
      else:
3817
        # Lock all nodes as all groups are potential targets
3818
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3819

    
3820
  def CheckPrereq(self):
3821
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3822
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3823
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3824

    
3825
    assert (self.req_target_uuids is None or
3826
            owned_groups.issuperset(self.req_target_uuids))
3827
    assert owned_instance_names == set([self.op.instance_name])
3828

    
3829
    # Get instance information
3830
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3831

    
3832
    # Check if node groups for locked instance are still correct
3833
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3834
      ("Instance %s's nodes changed while we kept the lock" %
3835
       self.op.instance_name)
3836

    
3837
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3838
                                          owned_groups)
3839

    
3840
    if self.req_target_uuids:
3841
      # User requested specific target groups
3842
      self.target_uuids = frozenset(self.req_target_uuids)
3843
    else:
3844
      # All groups except those used by the instance are potential targets
3845
      self.target_uuids = owned_groups - inst_groups
3846

    
3847
    conflicting_groups = self.target_uuids & inst_groups
3848
    if conflicting_groups:
3849
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3850
                                 " used by the instance '%s'" %
3851
                                 (utils.CommaJoin(conflicting_groups),
3852
                                  self.op.instance_name),
3853
                                 errors.ECODE_INVAL)
3854

    
3855
    if not self.target_uuids:
3856
      raise errors.OpPrereqError("There are no possible target groups",
3857
                                 errors.ECODE_INVAL)
3858

    
3859
  def BuildHooksEnv(self):
3860
    """Build hooks env.
3861

3862
    """
3863
    assert self.target_uuids
3864

    
3865
    env = {
3866
      "TARGET_GROUPS": " ".join(self.target_uuids),
3867
      }
3868

    
3869
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3870

    
3871
    return env
3872

    
3873
  def BuildHooksNodes(self):
3874
    """Build hooks nodes.
3875

3876
    """
3877
    mn = self.cfg.GetMasterNode()
3878
    return ([mn], [mn])
3879

    
3880
  def Exec(self, feedback_fn):
3881
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3882

    
3883
    assert instances == [self.op.instance_name], "Instance not locked"
3884

    
3885
    req = iallocator.IAReqGroupChange(instances=instances,
3886
                                      target_groups=list(self.target_uuids))
3887
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3888

    
3889
    ial.Run(self.op.iallocator)
3890

    
3891
    if not ial.success:
3892
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3893
                                 " instance '%s' using iallocator '%s': %s" %
3894
                                 (self.op.instance_name, self.op.iallocator,
3895
                                  ial.info), errors.ECODE_NORES)
3896

    
3897
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3898

    
3899
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3900
                 " instance '%s'", len(jobs), self.op.instance_name)
3901

    
3902
    return ResultWithJobs(jobs)