Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 9808764a

History | View | Annotate | Download (154.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
import ganeti.rpc.node as rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177
    vlan = nic.get(constants.INIC_VLAN, None)
178

    
179
    if net is None or net.lower() == constants.VALUE_NONE:
180
      net = None
181
    else:
182
      if nic_mode_req is not None or link is not None:
183
        raise errors.OpPrereqError("If network is given, no mode or link"
184
                                   " is allowed to be passed",
185
                                   errors.ECODE_INVAL)
186

    
187
    # ip validity checks
188
    if ip is None or ip.lower() == constants.VALUE_NONE:
189
      nic_ip = None
190
    elif ip.lower() == constants.VALUE_AUTO:
191
      if not op.name_check:
192
        raise errors.OpPrereqError("IP address set to auto but name checks"
193
                                   " have been skipped",
194
                                   errors.ECODE_INVAL)
195
      nic_ip = default_ip
196
    else:
197
      # We defer pool operations until later, so that the iallocator has
198
      # filled in the instance's node(s) dimara
199
      if ip.lower() == constants.NIC_IP_POOL:
200
        if net is None:
201
          raise errors.OpPrereqError("if ip=pool, parameter network"
202
                                     " must be passed too",
203
                                     errors.ECODE_INVAL)
204

    
205
      elif not netutils.IPAddress.IsValid(ip):
206
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
207
                                   errors.ECODE_INVAL)
208

    
209
      nic_ip = ip
210

    
211
    # TODO: check the ip address for uniqueness
212
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
213
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
214
                                 errors.ECODE_INVAL)
215

    
216
    # MAC address verification
217
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
218
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
219
      mac = utils.NormalizeAndValidateMac(mac)
220

    
221
      try:
222
        # TODO: We need to factor this out
223
        cfg.ReserveMAC(mac, ec_id)
224
      except errors.ReservationError:
225
        raise errors.OpPrereqError("MAC address %s already in use"
226
                                   " in cluster" % mac,
227
                                   errors.ECODE_NOTUNIQUE)
228

    
229
    #  Build nic parameters
230
    nicparams = {}
231
    if nic_mode_req:
232
      nicparams[constants.NIC_MODE] = nic_mode
233
    if link:
234
      nicparams[constants.NIC_LINK] = link
235
    if vlan:
236
      nicparams[constants.NIC_VLAN] = vlan
237

    
238
    check_params = cluster.SimpleFillNIC(nicparams)
239
    objects.NIC.CheckParameterSyntax(check_params)
240
    net_uuid = cfg.LookupNetwork(net)
241
    name = nic.get(constants.INIC_NAME, None)
242
    if name is not None and name.lower() == constants.VALUE_NONE:
243
      name = None
244
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
245
                          network=net_uuid, nicparams=nicparams)
246
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247
    nics.append(nic_obj)
248

    
249
  return nics
250

    
251

    
252
def _CheckForConflictingIp(lu, ip, node_uuid):
253
  """In case of conflicting IP address raise error.
254

255
  @type ip: string
256
  @param ip: IP address
257
  @type node_uuid: string
258
  @param node_uuid: node UUID
259

260
  """
261
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
262
  if conf_net is not None:
263
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
264
                                " network %s, but the target NIC does not." %
265
                                (ip, conf_net)),
266
                               errors.ECODE_STATE)
267

    
268
  return (None, None)
269

    
270

    
271
def _ComputeIPolicyInstanceSpecViolation(
272
  ipolicy, instance_spec, disk_template,
273
  _compute_fn=ComputeIPolicySpecViolation):
274
  """Compute if instance specs meets the specs of ipolicy.
275

276
  @type ipolicy: dict
277
  @param ipolicy: The ipolicy to verify against
278
  @param instance_spec: dict
279
  @param instance_spec: The instance spec to verify
280
  @type disk_template: string
281
  @param disk_template: the disk template of the instance
282
  @param _compute_fn: The function to verify ipolicy (unittest only)
283
  @see: L{ComputeIPolicySpecViolation}
284

285
  """
286
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
287
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
288
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
289
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
290
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
291
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
292

    
293
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
294
                     disk_sizes, spindle_use, disk_template)
295

    
296

    
297
def _CheckOSVariant(os_obj, name):
298
  """Check whether an OS name conforms to the os variants specification.
299

300
  @type os_obj: L{objects.OS}
301
  @param os_obj: OS object to check
302
  @type name: string
303
  @param name: OS name passed by the user, to check for validity
304

305
  """
306
  variant = objects.OS.GetVariant(name)
307
  if not os_obj.supported_variants:
308
    if variant:
309
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
310
                                 " passed)" % (os_obj.name, variant),
311
                                 errors.ECODE_INVAL)
312
    return
313
  if not variant:
314
    raise errors.OpPrereqError("OS name must include a variant",
315
                               errors.ECODE_INVAL)
316

    
317
  if variant not in os_obj.supported_variants:
318
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
319

    
320

    
321
class LUInstanceCreate(LogicalUnit):
322
  """Create an instance.
323

324
  """
325
  HPATH = "instance-add"
326
  HTYPE = constants.HTYPE_INSTANCE
327
  REQ_BGL = False
328

    
329
  def _CheckDiskTemplateValid(self):
330
    """Checks validity of disk template.
331

332
    """
333
    cluster = self.cfg.GetClusterInfo()
334
    if self.op.disk_template is None:
335
      # FIXME: It would be better to take the default disk template from the
336
      # ipolicy, but for the ipolicy we need the primary node, which we get from
337
      # the iallocator, which wants the disk template as input. To solve this
338
      # chicken-and-egg problem, it should be possible to specify just a node
339
      # group from the iallocator and take the ipolicy from that.
340
      self.op.disk_template = cluster.enabled_disk_templates[0]
341
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
342

    
343
  def _CheckDiskArguments(self):
344
    """Checks validity of disk-related arguments.
345

346
    """
347
    # check that disk's names are unique and valid
348
    utils.ValidateDeviceNames("disk", self.op.disks)
349

    
350
    self._CheckDiskTemplateValid()
351

    
352
    # check disks. parameter names and consistent adopt/no-adopt strategy
353
    has_adopt = has_no_adopt = False
354
    for disk in self.op.disks:
355
      if self.op.disk_template != constants.DT_EXT:
356
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
357
      if constants.IDISK_ADOPT in disk:
358
        has_adopt = True
359
      else:
360
        has_no_adopt = True
361
    if has_adopt and has_no_adopt:
362
      raise errors.OpPrereqError("Either all disks are adopted or none is",
363
                                 errors.ECODE_INVAL)
364
    if has_adopt:
365
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
366
        raise errors.OpPrereqError("Disk adoption is not supported for the"
367
                                   " '%s' disk template" %
368
                                   self.op.disk_template,
369
                                   errors.ECODE_INVAL)
370
      if self.op.iallocator is not None:
371
        raise errors.OpPrereqError("Disk adoption not allowed with an"
372
                                   " iallocator script", errors.ECODE_INVAL)
373
      if self.op.mode == constants.INSTANCE_IMPORT:
374
        raise errors.OpPrereqError("Disk adoption not allowed for"
375
                                   " instance import", errors.ECODE_INVAL)
376
    else:
377
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
378
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
379
                                   " but no 'adopt' parameter given" %
380
                                   self.op.disk_template,
381
                                   errors.ECODE_INVAL)
382

    
383
    self.adopt_disks = has_adopt
384

    
385
  def _CheckVLANArguments(self):
386
    """ Check validity of VLANs if given
387

388
    """
389
    for nic in self.op.nics:
390
      vlan = nic.get(constants.INIC_VLAN, None)
391
      if vlan:
392
        if vlan[0] == ".":
393
          # vlan starting with dot means single untagged vlan,
394
          # might be followed by trunk (:)
395
          if not vlan[1:].isdigit():
396
            vlanlist = vlan[1:].split(':')
397
            for vl in vlanlist:
398
              if not vl.isdigit():
399
                raise errors.OpPrereqError("Specified VLAN parameter is "
400
                                           "invalid : %s" % vlan,
401
                                             errors.ECODE_INVAL)
402
        elif vlan[0] == ":":
403
          # Trunk - tagged only
404
          vlanlist = vlan[1:].split(':')
405
          for vl in vlanlist:
406
            if not vl.isdigit():
407
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
408
                                           " : %s" % vlan, errors.ECODE_INVAL)
409
        elif vlan.isdigit():
410
          # This is the simplest case. No dots, only single digit
411
          # -> Create untagged access port, dot needs to be added
412
          nic[constants.INIC_VLAN] = "." + vlan
413
        else:
414
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
415
                                       " : %s" % vlan, errors.ECODE_INVAL)
416

    
417
  def CheckArguments(self):
418
    """Check arguments.
419

420
    """
421
    # do not require name_check to ease forward/backward compatibility
422
    # for tools
423
    if self.op.no_install and self.op.start:
424
      self.LogInfo("No-installation mode selected, disabling startup")
425
      self.op.start = False
426
    # validate/normalize the instance name
427
    self.op.instance_name = \
428
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
429

    
430
    if self.op.ip_check and not self.op.name_check:
431
      # TODO: make the ip check more flexible and not depend on the name check
432
      raise errors.OpPrereqError("Cannot do IP address check without a name"
433
                                 " check", errors.ECODE_INVAL)
434

    
435
    # add NIC for instance communication
436
    if self.op.instance_communication:
437
      nic_name = "%s%s" % (constants.INSTANCE_COMMUNICATION_NIC_PREFIX,
438
                           self.op.instance_name)
439

    
440
      self.op.nics.append({constants.INIC_NAME: nic_name,
441
                           constants.INIC_MAC: constants.VALUE_GENERATE,
442
                           constants.INIC_IP: constants.NIC_IP_POOL,
443
                           constants.INIC_NETWORK:
444
                             self.cfg.GetInstanceCommunicationNetwork()})
445

    
446
    # check nics' parameter names
447
    for nic in self.op.nics:
448
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
449
    # check that NIC's parameters names are unique and valid
450
    utils.ValidateDeviceNames("NIC", self.op.nics)
451

    
452
    self._CheckVLANArguments()
453

    
454
    self._CheckDiskArguments()
455
    assert self.op.disk_template is not None
456

    
457
    # instance name verification
458
    if self.op.name_check:
459
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
460
      self.op.instance_name = self.hostname.name
461
      # used in CheckPrereq for ip ping check
462
      self.check_ip = self.hostname.ip
463
    else:
464
      self.check_ip = None
465

    
466
    # file storage checks
467
    if (self.op.file_driver and
468
        not self.op.file_driver in constants.FILE_DRIVER):
469
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
470
                                 self.op.file_driver, errors.ECODE_INVAL)
471

    
472
    # set default file_driver if unset and required
473
    if (not self.op.file_driver and
474
        self.op.disk_template in constants.DTS_FILEBASED):
475
      self.op.file_driver = constants.FD_LOOP
476

    
477
    ### Node/iallocator related checks
478
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
479

    
480
    if self.op.pnode is not None:
481
      if self.op.disk_template in constants.DTS_INT_MIRROR:
482
        if self.op.snode is None:
483
          raise errors.OpPrereqError("The networked disk templates need"
484
                                     " a mirror node", errors.ECODE_INVAL)
485
      elif self.op.snode:
486
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
487
                        " template")
488
        self.op.snode = None
489

    
490
    _CheckOpportunisticLocking(self.op)
491

    
492
    if self.op.mode == constants.INSTANCE_IMPORT:
493
      # On import force_variant must be True, because if we forced it at
494
      # initial install, our only chance when importing it back is that it
495
      # works again!
496
      self.op.force_variant = True
497

    
498
      if self.op.no_install:
499
        self.LogInfo("No-installation mode has no effect during import")
500

    
501
    elif self.op.mode == constants.INSTANCE_CREATE:
502
      if self.op.os_type is None:
503
        raise errors.OpPrereqError("No guest OS specified",
504
                                   errors.ECODE_INVAL)
505
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
506
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
507
                                   " installation" % self.op.os_type,
508
                                   errors.ECODE_STATE)
509
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
510
      self._cds = GetClusterDomainSecret()
511

    
512
      # Check handshake to ensure both clusters have the same domain secret
513
      src_handshake = self.op.source_handshake
514
      if not src_handshake:
515
        raise errors.OpPrereqError("Missing source handshake",
516
                                   errors.ECODE_INVAL)
517

    
518
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
519
                                                           src_handshake)
520
      if errmsg:
521
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
522
                                   errors.ECODE_INVAL)
523

    
524
      # Load and check source CA
525
      self.source_x509_ca_pem = self.op.source_x509_ca
526
      if not self.source_x509_ca_pem:
527
        raise errors.OpPrereqError("Missing source X509 CA",
528
                                   errors.ECODE_INVAL)
529

    
530
      try:
531
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
532
                                                    self._cds)
533
      except OpenSSL.crypto.Error, err:
534
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
535
                                   (err, ), errors.ECODE_INVAL)
536

    
537
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
538
      if errcode is not None:
539
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
540
                                   errors.ECODE_INVAL)
541

    
542
      self.source_x509_ca = cert
543

    
544
      src_instance_name = self.op.source_instance_name
545
      if not src_instance_name:
546
        raise errors.OpPrereqError("Missing source instance name",
547
                                   errors.ECODE_INVAL)
548

    
549
      self.source_instance_name = \
550
        netutils.GetHostname(name=src_instance_name).name
551

    
552
    else:
553
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
554
                                 self.op.mode, errors.ECODE_INVAL)
555

    
556
  def ExpandNames(self):
557
    """ExpandNames for CreateInstance.
558

559
    Figure out the right locks for instance creation.
560

561
    """
562
    self.needed_locks = {}
563

    
564
    # this is just a preventive check, but someone might still add this
565
    # instance in the meantime, and creation will fail at lock-add time
566
    if self.op.instance_name in\
567
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
568
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
569
                                 self.op.instance_name, errors.ECODE_EXISTS)
570

    
571
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
572

    
573
    if self.op.iallocator:
574
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
575
      # specifying a group on instance creation and then selecting nodes from
576
      # that group
577
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
578
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
579

    
580
      if self.op.opportunistic_locking:
581
        self.opportunistic_locks[locking.LEVEL_NODE] = True
582
    else:
583
      (self.op.pnode_uuid, self.op.pnode) = \
584
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
585
      nodelist = [self.op.pnode_uuid]
586
      if self.op.snode is not None:
587
        (self.op.snode_uuid, self.op.snode) = \
588
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
589
        nodelist.append(self.op.snode_uuid)
590
      self.needed_locks[locking.LEVEL_NODE] = nodelist
591

    
592
    # in case of import lock the source node too
593
    if self.op.mode == constants.INSTANCE_IMPORT:
594
      src_node = self.op.src_node
595
      src_path = self.op.src_path
596

    
597
      if src_path is None:
598
        self.op.src_path = src_path = self.op.instance_name
599

    
600
      if src_node is None:
601
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
602
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
603
        self.op.src_node = None
604
        if os.path.isabs(src_path):
605
          raise errors.OpPrereqError("Importing an instance from a path"
606
                                     " requires a source node option",
607
                                     errors.ECODE_INVAL)
608
      else:
609
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
610
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
611
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
612
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
613
        if not os.path.isabs(src_path):
614
          self.op.src_path = \
615
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
616

    
617
    self.needed_locks[locking.LEVEL_NODE_RES] = \
618
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
619

    
620
    # Optimistically acquire shared group locks (we're reading the
621
    # configuration).  We can't just call GetInstanceNodeGroups, because the
622
    # instance doesn't exist yet. Therefore we lock all node groups of all
623
    # nodes we have.
624
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
625
      # In the case we lock all nodes for opportunistic allocation, we have no
626
      # choice than to lock all groups, because they're allocated before nodes.
627
      # This is sad, but true. At least we release all those we don't need in
628
      # CheckPrereq later.
629
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
630
    else:
631
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
632
        list(self.cfg.GetNodeGroupsFromNodes(
633
          self.needed_locks[locking.LEVEL_NODE]))
634
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
635

    
636
  def DeclareLocks(self, level):
637
    if level == locking.LEVEL_NODE_RES and \
638
      self.opportunistic_locks[locking.LEVEL_NODE]:
639
      # Even when using opportunistic locking, we require the same set of
640
      # NODE_RES locks as we got NODE locks
641
      self.needed_locks[locking.LEVEL_NODE_RES] = \
642
        self.owned_locks(locking.LEVEL_NODE)
643

    
644
  def _RunAllocator(self):
645
    """Run the allocator based on input opcode.
646

647
    """
648
    if self.op.opportunistic_locking:
649
      # Only consider nodes for which a lock is held
650
      node_name_whitelist = self.cfg.GetNodeNames(
651
        self.owned_locks(locking.LEVEL_NODE))
652
    else:
653
      node_name_whitelist = None
654

    
655
    req = _CreateInstanceAllocRequest(self.op, self.disks,
656
                                      self.nics, self.be_full,
657
                                      node_name_whitelist)
658
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
659

    
660
    ial.Run(self.op.iallocator)
661

    
662
    if not ial.success:
663
      # When opportunistic locks are used only a temporary failure is generated
664
      if self.op.opportunistic_locking:
665
        ecode = errors.ECODE_TEMP_NORES
666
      else:
667
        ecode = errors.ECODE_NORES
668

    
669
      raise errors.OpPrereqError("Can't compute nodes using"
670
                                 " iallocator '%s': %s" %
671
                                 (self.op.iallocator, ial.info),
672
                                 ecode)
673

    
674
    (self.op.pnode_uuid, self.op.pnode) = \
675
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
676
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
677
                 self.op.instance_name, self.op.iallocator,
678
                 utils.CommaJoin(ial.result))
679

    
680
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
681

    
682
    if req.RequiredNodes() == 2:
683
      (self.op.snode_uuid, self.op.snode) = \
684
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
685

    
686
  def BuildHooksEnv(self):
687
    """Build hooks env.
688

689
    This runs on master, primary and secondary nodes of the instance.
690

691
    """
692
    env = {
693
      "ADD_MODE": self.op.mode,
694
      }
695
    if self.op.mode == constants.INSTANCE_IMPORT:
696
      env["SRC_NODE"] = self.op.src_node
697
      env["SRC_PATH"] = self.op.src_path
698
      env["SRC_IMAGES"] = self.src_images
699

    
700
    env.update(BuildInstanceHookEnv(
701
      name=self.op.instance_name,
702
      primary_node_name=self.op.pnode,
703
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
704
      status=self.op.start,
705
      os_type=self.op.os_type,
706
      minmem=self.be_full[constants.BE_MINMEM],
707
      maxmem=self.be_full[constants.BE_MAXMEM],
708
      vcpus=self.be_full[constants.BE_VCPUS],
709
      nics=NICListToTuple(self, self.nics),
710
      disk_template=self.op.disk_template,
711
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
712
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
713
             for d in self.disks],
714
      bep=self.be_full,
715
      hvp=self.hv_full,
716
      hypervisor_name=self.op.hypervisor,
717
      tags=self.op.tags,
718
      ))
719

    
720
    return env
721

    
722
  def BuildHooksNodes(self):
723
    """Build hooks nodes.
724

725
    """
726
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
727
    return nl, nl
728

    
729
  def _ReadExportInfo(self):
730
    """Reads the export information from disk.
731

732
    It will override the opcode source node and path with the actual
733
    information, if these two were not specified before.
734

735
    @return: the export information
736

737
    """
738
    assert self.op.mode == constants.INSTANCE_IMPORT
739

    
740
    if self.op.src_node_uuid is None:
741
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
742
      exp_list = self.rpc.call_export_list(locked_nodes)
743
      found = False
744
      for node_uuid in exp_list:
745
        if exp_list[node_uuid].fail_msg:
746
          continue
747
        if self.op.src_path in exp_list[node_uuid].payload:
748
          found = True
749
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
750
          self.op.src_node_uuid = node_uuid
751
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
752
                                            self.op.src_path)
753
          break
754
      if not found:
755
        raise errors.OpPrereqError("No export found for relative path %s" %
756
                                   self.op.src_path, errors.ECODE_INVAL)
757

    
758
    CheckNodeOnline(self, self.op.src_node_uuid)
759
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
760
    result.Raise("No export or invalid export found in dir %s" %
761
                 self.op.src_path)
762

    
763
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
764
    if not export_info.has_section(constants.INISECT_EXP):
765
      raise errors.ProgrammerError("Corrupted export config",
766
                                   errors.ECODE_ENVIRON)
767

    
768
    ei_version = export_info.get(constants.INISECT_EXP, "version")
769
    if int(ei_version) != constants.EXPORT_VERSION:
770
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
771
                                 (ei_version, constants.EXPORT_VERSION),
772
                                 errors.ECODE_ENVIRON)
773
    return export_info
774

    
775
  def _ReadExportParams(self, einfo):
776
    """Use export parameters as defaults.
777

778
    In case the opcode doesn't specify (as in override) some instance
779
    parameters, then try to use them from the export information, if
780
    that declares them.
781

782
    """
783
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
784

    
785
    if not self.op.disks:
786
      disks = []
787
      # TODO: import the disk iv_name too
788
      for idx in range(constants.MAX_DISKS):
789
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
790
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
791
          disks.append({constants.IDISK_SIZE: disk_sz})
792
      self.op.disks = disks
793
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
794
        raise errors.OpPrereqError("No disk info specified and the export"
795
                                   " is missing the disk information",
796
                                   errors.ECODE_INVAL)
797

    
798
    if not self.op.nics:
799
      nics = []
800
      for idx in range(constants.MAX_NICS):
801
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
802
          ndict = {}
803
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
804
            nic_param_name = "nic%d_%s" % (idx, name)
805
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
806
              v = einfo.get(constants.INISECT_INS, nic_param_name)
807
              ndict[name] = v
808
          nics.append(ndict)
809
        else:
810
          break
811
      self.op.nics = nics
812

    
813
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
814
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
815

    
816
    if (self.op.hypervisor is None and
817
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
818
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
819

    
820
    if einfo.has_section(constants.INISECT_HYP):
821
      # use the export parameters but do not override the ones
822
      # specified by the user
823
      for name, value in einfo.items(constants.INISECT_HYP):
824
        if name not in self.op.hvparams:
825
          self.op.hvparams[name] = value
826

    
827
    if einfo.has_section(constants.INISECT_BEP):
828
      # use the parameters, without overriding
829
      for name, value in einfo.items(constants.INISECT_BEP):
830
        if name not in self.op.beparams:
831
          self.op.beparams[name] = value
832
        # Compatibility for the old "memory" be param
833
        if name == constants.BE_MEMORY:
834
          if constants.BE_MAXMEM not in self.op.beparams:
835
            self.op.beparams[constants.BE_MAXMEM] = value
836
          if constants.BE_MINMEM not in self.op.beparams:
837
            self.op.beparams[constants.BE_MINMEM] = value
838
    else:
839
      # try to read the parameters old style, from the main section
840
      for name in constants.BES_PARAMETERS:
841
        if (name not in self.op.beparams and
842
            einfo.has_option(constants.INISECT_INS, name)):
843
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
844

    
845
    if einfo.has_section(constants.INISECT_OSP):
846
      # use the parameters, without overriding
847
      for name, value in einfo.items(constants.INISECT_OSP):
848
        if name not in self.op.osparams:
849
          self.op.osparams[name] = value
850

    
851
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
852
      # use the parameters, without overriding
853
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
854
        if name not in self.op.osparams_private:
855
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
856

    
857
  def _RevertToDefaults(self, cluster):
858
    """Revert the instance parameters to the default values.
859

860
    """
861
    # hvparams
862
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
863
    for name in self.op.hvparams.keys():
864
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
865
        del self.op.hvparams[name]
866
    # beparams
867
    be_defs = cluster.SimpleFillBE({})
868
    for name in self.op.beparams.keys():
869
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
870
        del self.op.beparams[name]
871
    # nic params
872
    nic_defs = cluster.SimpleFillNIC({})
873
    for nic in self.op.nics:
874
      for name in constants.NICS_PARAMETERS:
875
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
876
          del nic[name]
877
    # osparams
878
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
879
    for name in self.op.osparams.keys():
880
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
881
        del self.op.osparams[name]
882

    
883
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
884
                                    os_params_private={})
885
    for name in self.op.osparams_private.keys():
886
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
887
        del self.op.osparams_private[name]
888

    
889
  def _CalculateFileStorageDir(self):
890
    """Calculate final instance file storage dir.
891

892
    """
893
    # file storage dir calculation/check
894
    self.instance_file_storage_dir = None
895
    if self.op.disk_template in constants.DTS_FILEBASED:
896
      # build the full file storage dir path
897
      joinargs = []
898

    
899
      cfg_storage = None
900
      if self.op.disk_template == constants.DT_FILE:
901
        cfg_storage = self.cfg.GetFileStorageDir()
902
      elif self.op.disk_template == constants.DT_SHARED_FILE:
903
        cfg_storage = self.cfg.GetSharedFileStorageDir()
904
      elif self.op.disk_template == constants.DT_GLUSTER:
905
        cfg_storage = self.cfg.GetGlusterStorageDir()
906

    
907
      if not cfg_storage:
908
        raise errors.OpPrereqError(
909
          "Cluster file storage dir for {tpl} storage type not defined".format(
910
            tpl=repr(self.op.disk_template)
911
          ),
912
          errors.ECODE_STATE
913
      )
914

    
915
      joinargs.append(cfg_storage)
916

    
917
      if self.op.file_storage_dir is not None:
918
        joinargs.append(self.op.file_storage_dir)
919

    
920
      if self.op.disk_template != constants.DT_GLUSTER:
921
        joinargs.append(self.op.instance_name)
922

    
923
      if len(joinargs) > 1:
924
        # pylint: disable=W0142
925
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
926
      else:
927
        self.instance_file_storage_dir = joinargs[0]
928

    
929
  def CheckPrereq(self): # pylint: disable=R0914
930
    """Check prerequisites.
931

932
    """
933
    # Check that the optimistically acquired groups are correct wrt the
934
    # acquired nodes
935
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
936
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
937
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
938
    if not owned_groups.issuperset(cur_groups):
939
      raise errors.OpPrereqError("New instance %s's node groups changed since"
940
                                 " locks were acquired, current groups are"
941
                                 " are '%s', owning groups '%s'; retry the"
942
                                 " operation" %
943
                                 (self.op.instance_name,
944
                                  utils.CommaJoin(cur_groups),
945
                                  utils.CommaJoin(owned_groups)),
946
                                 errors.ECODE_STATE)
947

    
948
    self._CalculateFileStorageDir()
949

    
950
    if self.op.mode == constants.INSTANCE_IMPORT:
951
      export_info = self._ReadExportInfo()
952
      self._ReadExportParams(export_info)
953
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
954
    else:
955
      self._old_instance_name = None
956

    
957
    if (not self.cfg.GetVGName() and
958
        self.op.disk_template not in constants.DTS_NOT_LVM):
959
      raise errors.OpPrereqError("Cluster does not support lvm-based"
960
                                 " instances", errors.ECODE_STATE)
961

    
962
    if (self.op.hypervisor is None or
963
        self.op.hypervisor == constants.VALUE_AUTO):
964
      self.op.hypervisor = self.cfg.GetHypervisorType()
965

    
966
    cluster = self.cfg.GetClusterInfo()
967
    enabled_hvs = cluster.enabled_hypervisors
968
    if self.op.hypervisor not in enabled_hvs:
969
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
970
                                 " cluster (%s)" %
971
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
972
                                 errors.ECODE_STATE)
973

    
974
    # Check tag validity
975
    for tag in self.op.tags:
976
      objects.TaggableObject.ValidateTag(tag)
977

    
978
    # check hypervisor parameter syntax (locally)
979
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
980
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
981
                                      self.op.hvparams)
982
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
983
    hv_type.CheckParameterSyntax(filled_hvp)
984
    self.hv_full = filled_hvp
985
    # check that we don't specify global parameters on an instance
986
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
987
                         "instance", "cluster")
988

    
989
    # fill and remember the beparams dict
990
    self.be_full = _ComputeFullBeParams(self.op, cluster)
991

    
992
    # build os parameters
993
    if self.op.osparams_private is None:
994
      self.op.osparams_private = serializer.PrivateDict()
995
    if self.op.osparams_secret is None:
996
      self.op.osparams_secret = serializer.PrivateDict()
997

    
998
    self.os_full = cluster.SimpleFillOS(
999
      self.op.os_type,
1000
      self.op.osparams,
1001
      os_params_private=self.op.osparams_private,
1002
      os_params_secret=self.op.osparams_secret
1003
    )
1004

    
1005
    # now that hvp/bep are in final format, let's reset to defaults,
1006
    # if told to do so
1007
    if self.op.identify_defaults:
1008
      self._RevertToDefaults(cluster)
1009

    
1010
    # NIC buildup
1011
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1012
                             self.proc.GetECId())
1013

    
1014
    # disk checks/pre-build
1015
    default_vg = self.cfg.GetVGName()
1016
    self.disks = ComputeDisks(self.op, default_vg)
1017

    
1018
    if self.op.mode == constants.INSTANCE_IMPORT:
1019
      disk_images = []
1020
      for idx in range(len(self.disks)):
1021
        option = "disk%d_dump" % idx
1022
        if export_info.has_option(constants.INISECT_INS, option):
1023
          # FIXME: are the old os-es, disk sizes, etc. useful?
1024
          export_name = export_info.get(constants.INISECT_INS, option)
1025
          image = utils.PathJoin(self.op.src_path, export_name)
1026
          disk_images.append(image)
1027
        else:
1028
          disk_images.append(False)
1029

    
1030
      self.src_images = disk_images
1031

    
1032
      if self.op.instance_name == self._old_instance_name:
1033
        for idx, nic in enumerate(self.nics):
1034
          if nic.mac == constants.VALUE_AUTO:
1035
            nic_mac_ini = "nic%d_mac" % idx
1036
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1037

    
1038
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1039

    
1040
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1041
    if self.op.ip_check:
1042
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1043
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1044
                                   (self.check_ip, self.op.instance_name),
1045
                                   errors.ECODE_NOTUNIQUE)
1046

    
1047
    #### mac address generation
1048
    # By generating here the mac address both the allocator and the hooks get
1049
    # the real final mac address rather than the 'auto' or 'generate' value.
1050
    # There is a race condition between the generation and the instance object
1051
    # creation, which means that we know the mac is valid now, but we're not
1052
    # sure it will be when we actually add the instance. If things go bad
1053
    # adding the instance will abort because of a duplicate mac, and the
1054
    # creation job will fail.
1055
    for nic in self.nics:
1056
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1057
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1058

    
1059
    #### allocator run
1060

    
1061
    if self.op.iallocator is not None:
1062
      self._RunAllocator()
1063

    
1064
    # Release all unneeded node locks
1065
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1066
                               self.op.src_node_uuid])
1067
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1068
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1069
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1070
    # Release all unneeded group locks
1071
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1072
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1073

    
1074
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1075
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1076
      "Node locks differ from node resource locks"
1077

    
1078
    #### node related checks
1079

    
1080
    # check primary node
1081
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1082
    assert self.pnode is not None, \
1083
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1084
    if pnode.offline:
1085
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1086
                                 pnode.name, errors.ECODE_STATE)
1087
    if pnode.drained:
1088
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1089
                                 pnode.name, errors.ECODE_STATE)
1090
    if not pnode.vm_capable:
1091
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1092
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1093

    
1094
    self.secondaries = []
1095

    
1096
    # Fill in any IPs from IP pools. This must happen here, because we need to
1097
    # know the nic's primary node, as specified by the iallocator
1098
    for idx, nic in enumerate(self.nics):
1099
      net_uuid = nic.network
1100
      if net_uuid is not None:
1101
        nobj = self.cfg.GetNetwork(net_uuid)
1102
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1103
        if netparams is None:
1104
          raise errors.OpPrereqError("No netparams found for network"
1105
                                     " %s. Probably not connected to"
1106
                                     " node's %s nodegroup" %
1107
                                     (nobj.name, self.pnode.name),
1108
                                     errors.ECODE_INVAL)
1109
        self.LogInfo("NIC/%d inherits netparams %s" %
1110
                     (idx, netparams.values()))
1111
        nic.nicparams = dict(netparams)
1112
        if nic.ip is not None:
1113
          if nic.ip.lower() == constants.NIC_IP_POOL:
1114
            try:
1115
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1116
            except errors.ReservationError:
1117
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1118
                                         " from the address pool" % idx,
1119
                                         errors.ECODE_STATE)
1120
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1121
          else:
1122
            try:
1123
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1124
                                 check=self.op.conflicts_check)
1125
            except errors.ReservationError:
1126
              raise errors.OpPrereqError("IP address %s already in use"
1127
                                         " or does not belong to network %s" %
1128
                                         (nic.ip, nobj.name),
1129
                                         errors.ECODE_NOTUNIQUE)
1130

    
1131
      # net is None, ip None or given
1132
      elif self.op.conflicts_check:
1133
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1134

    
1135
    # mirror node verification
1136
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1137
      if self.op.snode_uuid == pnode.uuid:
1138
        raise errors.OpPrereqError("The secondary node cannot be the"
1139
                                   " primary node", errors.ECODE_INVAL)
1140
      CheckNodeOnline(self, self.op.snode_uuid)
1141
      CheckNodeNotDrained(self, self.op.snode_uuid)
1142
      CheckNodeVmCapable(self, self.op.snode_uuid)
1143
      self.secondaries.append(self.op.snode_uuid)
1144

    
1145
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1146
      if pnode.group != snode.group:
1147
        self.LogWarning("The primary and secondary nodes are in two"
1148
                        " different node groups; the disk parameters"
1149
                        " from the first disk's node group will be"
1150
                        " used")
1151

    
1152
    nodes = [pnode]
1153
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1154
      nodes.append(snode)
1155
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1156
    excl_stor = compat.any(map(has_es, nodes))
1157
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1158
      raise errors.OpPrereqError("Disk template %s not supported with"
1159
                                 " exclusive storage" % self.op.disk_template,
1160
                                 errors.ECODE_STATE)
1161
    for disk in self.disks:
1162
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1163

    
1164
    node_uuids = [pnode.uuid] + self.secondaries
1165

    
1166
    if not self.adopt_disks:
1167
      if self.op.disk_template == constants.DT_RBD:
1168
        # _CheckRADOSFreeSpace() is just a placeholder.
1169
        # Any function that checks prerequisites can be placed here.
1170
        # Check if there is enough space on the RADOS cluster.
1171
        CheckRADOSFreeSpace()
1172
      elif self.op.disk_template == constants.DT_EXT:
1173
        # FIXME: Function that checks prereqs if needed
1174
        pass
1175
      elif self.op.disk_template in constants.DTS_LVM:
1176
        # Check lv size requirements, if not adopting
1177
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1178
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1179
      else:
1180
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1181
        pass
1182

    
1183
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1184
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1185
                                disk[constants.IDISK_ADOPT])
1186
                     for disk in self.disks])
1187
      if len(all_lvs) != len(self.disks):
1188
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1189
                                   errors.ECODE_INVAL)
1190
      for lv_name in all_lvs:
1191
        try:
1192
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1193
          # to ReserveLV uses the same syntax
1194
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1195
        except errors.ReservationError:
1196
          raise errors.OpPrereqError("LV named %s used by another instance" %
1197
                                     lv_name, errors.ECODE_NOTUNIQUE)
1198

    
1199
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1200
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1201

    
1202
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1203
                                       vg_names.payload.keys())[pnode.uuid]
1204
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1205
      node_lvs = node_lvs.payload
1206

    
1207
      delta = all_lvs.difference(node_lvs.keys())
1208
      if delta:
1209
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1210
                                   utils.CommaJoin(delta),
1211
                                   errors.ECODE_INVAL)
1212
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1213
      if online_lvs:
1214
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1215
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1216
                                   errors.ECODE_STATE)
1217
      # update the size of disk based on what is found
1218
      for dsk in self.disks:
1219
        dsk[constants.IDISK_SIZE] = \
1220
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1221
                                        dsk[constants.IDISK_ADOPT])][0]))
1222

    
1223
    elif self.op.disk_template == constants.DT_BLOCK:
1224
      # Normalize and de-duplicate device paths
1225
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1226
                       for disk in self.disks])
1227
      if len(all_disks) != len(self.disks):
1228
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1229
                                   errors.ECODE_INVAL)
1230
      baddisks = [d for d in all_disks
1231
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1232
      if baddisks:
1233
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1234
                                   " cannot be adopted" %
1235
                                   (utils.CommaJoin(baddisks),
1236
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1237
                                   errors.ECODE_INVAL)
1238

    
1239
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1240
                                            list(all_disks))[pnode.uuid]
1241
      node_disks.Raise("Cannot get block device information from node %s" %
1242
                       pnode.name)
1243
      node_disks = node_disks.payload
1244
      delta = all_disks.difference(node_disks.keys())
1245
      if delta:
1246
        raise errors.OpPrereqError("Missing block device(s): %s" %
1247
                                   utils.CommaJoin(delta),
1248
                                   errors.ECODE_INVAL)
1249
      for dsk in self.disks:
1250
        dsk[constants.IDISK_SIZE] = \
1251
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1252

    
1253
    # Check disk access param to be compatible with specified hypervisor
1254
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1255
    node_group = self.cfg.GetNodeGroup(node_info.group)
1256
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1257
    access_type = disk_params[self.op.disk_template].get(
1258
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1259
    )
1260

    
1261
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1262
                                            self.op.disk_template,
1263
                                            access_type):
1264
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1265
                                 " used with %s disk access param" %
1266
                                 (self.op.hypervisor, access_type),
1267
                                  errors.ECODE_STATE)
1268

    
1269
    # Verify instance specs
1270
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1271
    ispec = {
1272
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1273
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1274
      constants.ISPEC_DISK_COUNT: len(self.disks),
1275
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1276
                                  for disk in self.disks],
1277
      constants.ISPEC_NIC_COUNT: len(self.nics),
1278
      constants.ISPEC_SPINDLE_USE: spindle_use,
1279
      }
1280

    
1281
    group_info = self.cfg.GetNodeGroup(pnode.group)
1282
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1283
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1284
                                               self.op.disk_template)
1285
    if not self.op.ignore_ipolicy and res:
1286
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1287
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1288
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1289

    
1290
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1291

    
1292
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1293
    # check OS parameters (remotely)
1294
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1295

    
1296
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1297

    
1298
    #TODO: _CheckExtParams (remotely)
1299
    # Check parameters for extstorage
1300

    
1301
    # memory check on primary node
1302
    #TODO(dynmem): use MINMEM for checking
1303
    if self.op.start:
1304
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1305
                                self.op.hvparams)
1306
      CheckNodeFreeMemory(self, self.pnode.uuid,
1307
                          "creating instance %s" % self.op.instance_name,
1308
                          self.be_full[constants.BE_MAXMEM],
1309
                          self.op.hypervisor, hvfull)
1310

    
1311
    self.dry_run_result = list(node_uuids)
1312

    
1313
  def Exec(self, feedback_fn):
1314
    """Create and add the instance to the cluster.
1315

1316
    """
1317
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1318
                self.owned_locks(locking.LEVEL_NODE)), \
1319
      "Node locks differ from node resource locks"
1320
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1321

    
1322
    ht_kind = self.op.hypervisor
1323
    if ht_kind in constants.HTS_REQ_PORT:
1324
      network_port = self.cfg.AllocatePort()
1325
    else:
1326
      network_port = None
1327

    
1328
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1329

    
1330
    # This is ugly but we got a chicken-egg problem here
1331
    # We can only take the group disk parameters, as the instance
1332
    # has no disks yet (we are generating them right here).
1333
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1334
    disks = GenerateDiskTemplate(self,
1335
                                 self.op.disk_template,
1336
                                 instance_uuid, self.pnode.uuid,
1337
                                 self.secondaries,
1338
                                 self.disks,
1339
                                 self.instance_file_storage_dir,
1340
                                 self.op.file_driver,
1341
                                 0,
1342
                                 feedback_fn,
1343
                                 self.cfg.GetGroupDiskParams(nodegroup))
1344

    
1345
    iobj = objects.Instance(name=self.op.instance_name,
1346
                            uuid=instance_uuid,
1347
                            os=self.op.os_type,
1348
                            primary_node=self.pnode.uuid,
1349
                            nics=self.nics, disks=disks,
1350
                            disk_template=self.op.disk_template,
1351
                            disks_active=False,
1352
                            admin_state=constants.ADMINST_DOWN,
1353
                            network_port=network_port,
1354
                            beparams=self.op.beparams,
1355
                            hvparams=self.op.hvparams,
1356
                            hypervisor=self.op.hypervisor,
1357
                            osparams=self.op.osparams,
1358
                            osparams_private=self.op.osparams_private,
1359
                            )
1360

    
1361
    if self.op.tags:
1362
      for tag in self.op.tags:
1363
        iobj.AddTag(tag)
1364

    
1365
    if self.adopt_disks:
1366
      if self.op.disk_template == constants.DT_PLAIN:
1367
        # rename LVs to the newly-generated names; we need to construct
1368
        # 'fake' LV disks with the old data, plus the new unique_id
1369
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1370
        rename_to = []
1371
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1372
          rename_to.append(t_dsk.logical_id)
1373
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1374
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1375
                                               zip(tmp_disks, rename_to))
1376
        result.Raise("Failed to rename adoped LVs")
1377
    else:
1378
      feedback_fn("* creating instance disks...")
1379
      try:
1380
        CreateDisks(self, iobj)
1381
      except errors.OpExecError:
1382
        self.LogWarning("Device creation failed")
1383
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1384
        raise
1385

    
1386
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1387

    
1388
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1389

    
1390
    # Declare that we don't want to remove the instance lock anymore, as we've
1391
    # added the instance to the config
1392
    del self.remove_locks[locking.LEVEL_INSTANCE]
1393

    
1394
    if self.op.mode == constants.INSTANCE_IMPORT:
1395
      # Release unused nodes
1396
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1397
    else:
1398
      # Release all nodes
1399
      ReleaseLocks(self, locking.LEVEL_NODE)
1400

    
1401
    disk_abort = False
1402
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1403
      feedback_fn("* wiping instance disks...")
1404
      try:
1405
        WipeDisks(self, iobj)
1406
      except errors.OpExecError, err:
1407
        logging.exception("Wiping disks failed")
1408
        self.LogWarning("Wiping instance disks failed (%s)", err)
1409
        disk_abort = True
1410

    
1411
    if disk_abort:
1412
      # Something is already wrong with the disks, don't do anything else
1413
      pass
1414
    elif self.op.wait_for_sync:
1415
      disk_abort = not WaitForSync(self, iobj)
1416
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1417
      # make sure the disks are not degraded (still sync-ing is ok)
1418
      feedback_fn("* checking mirrors status")
1419
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1420
    else:
1421
      disk_abort = False
1422

    
1423
    if disk_abort:
1424
      RemoveDisks(self, iobj)
1425
      self.cfg.RemoveInstance(iobj.uuid)
1426
      # Make sure the instance lock gets removed
1427
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1428
      raise errors.OpExecError("There are some degraded disks for"
1429
                               " this instance")
1430

    
1431
    # instance disks are now active
1432
    iobj.disks_active = True
1433

    
1434
    # Release all node resource locks
1435
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1436

    
1437
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1438
      if self.op.mode == constants.INSTANCE_CREATE:
1439
        if not self.op.no_install:
1440
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1441
                        not self.op.wait_for_sync)
1442
          if pause_sync:
1443
            feedback_fn("* pausing disk sync to install instance OS")
1444
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1445
                                                              (iobj.disks,
1446
                                                               iobj), True)
1447
            for idx, success in enumerate(result.payload):
1448
              if not success:
1449
                logging.warn("pause-sync of instance %s for disk %d failed",
1450
                             self.op.instance_name, idx)
1451

    
1452
          feedback_fn("* running the instance OS create scripts...")
1453
          # FIXME: pass debug option from opcode to backend
1454
          os_add_result = \
1455
            self.rpc.call_instance_os_add(self.pnode.uuid,
1456
                                          (iobj, self.op.osparams_secret),
1457
                                          False,
1458
                                          self.op.debug_level)
1459
          if pause_sync:
1460
            feedback_fn("* resuming disk sync")
1461
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1462
                                                              (iobj.disks,
1463
                                                               iobj), False)
1464
            for idx, success in enumerate(result.payload):
1465
              if not success:
1466
                logging.warn("resume-sync of instance %s for disk %d failed",
1467
                             self.op.instance_name, idx)
1468

    
1469
          os_add_result.Raise("Could not add os for instance %s"
1470
                              " on node %s" % (self.op.instance_name,
1471
                                               self.pnode.name))
1472

    
1473
      else:
1474
        if self.op.mode == constants.INSTANCE_IMPORT:
1475
          feedback_fn("* running the instance OS import scripts...")
1476

    
1477
          transfers = []
1478

    
1479
          for idx, image in enumerate(self.src_images):
1480
            if not image:
1481
              continue
1482

    
1483
            # FIXME: pass debug option from opcode to backend
1484
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1485
                                               constants.IEIO_FILE, (image, ),
1486
                                               constants.IEIO_SCRIPT,
1487
                                               ((iobj.disks[idx], iobj), idx),
1488
                                               None)
1489
            transfers.append(dt)
1490

    
1491
          import_result = \
1492
            masterd.instance.TransferInstanceData(self, feedback_fn,
1493
                                                  self.op.src_node_uuid,
1494
                                                  self.pnode.uuid,
1495
                                                  self.pnode.secondary_ip,
1496
                                                  self.op.compress,
1497
                                                  iobj, transfers)
1498
          if not compat.all(import_result):
1499
            self.LogWarning("Some disks for instance %s on node %s were not"
1500
                            " imported successfully" % (self.op.instance_name,
1501
                                                        self.pnode.name))
1502

    
1503
          rename_from = self._old_instance_name
1504

    
1505
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1506
          feedback_fn("* preparing remote import...")
1507
          # The source cluster will stop the instance before attempting to make
1508
          # a connection. In some cases stopping an instance can take a long
1509
          # time, hence the shutdown timeout is added to the connection
1510
          # timeout.
1511
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1512
                             self.op.source_shutdown_timeout)
1513
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1514

    
1515
          assert iobj.primary_node == self.pnode.uuid
1516
          disk_results = \
1517
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1518
                                          self.source_x509_ca,
1519
                                          self._cds, self.op.compress, timeouts)
1520
          if not compat.all(disk_results):
1521
            # TODO: Should the instance still be started, even if some disks
1522
            # failed to import (valid for local imports, too)?
1523
            self.LogWarning("Some disks for instance %s on node %s were not"
1524
                            " imported successfully" % (self.op.instance_name,
1525
                                                        self.pnode.name))
1526

    
1527
          rename_from = self.source_instance_name
1528

    
1529
        else:
1530
          # also checked in the prereq part
1531
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1532
                                       % self.op.mode)
1533

    
1534
        # Run rename script on newly imported instance
1535
        assert iobj.name == self.op.instance_name
1536
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1537
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1538
                                                   rename_from,
1539
                                                   self.op.debug_level)
1540
        result.Warn("Failed to run rename script for %s on node %s" %
1541
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1542

    
1543
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1544

    
1545
    if self.op.start:
1546
      iobj.admin_state = constants.ADMINST_UP
1547
      self.cfg.Update(iobj, feedback_fn)
1548
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1549
                   self.pnode.name)
1550
      feedback_fn("* starting instance...")
1551
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1552
                                            False, self.op.reason)
1553
      result.Raise("Could not start instance")
1554

    
1555
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1556

    
1557

    
1558
class LUInstanceRename(LogicalUnit):
1559
  """Rename an instance.
1560

1561
  """
1562
  HPATH = "instance-rename"
1563
  HTYPE = constants.HTYPE_INSTANCE
1564

    
1565
  def CheckArguments(self):
1566
    """Check arguments.
1567

1568
    """
1569
    if self.op.ip_check and not self.op.name_check:
1570
      # TODO: make the ip check more flexible and not depend on the name check
1571
      raise errors.OpPrereqError("IP address check requires a name check",
1572
                                 errors.ECODE_INVAL)
1573

    
1574
  def BuildHooksEnv(self):
1575
    """Build hooks env.
1576

1577
    This runs on master, primary and secondary nodes of the instance.
1578

1579
    """
1580
    env = BuildInstanceHookEnvByObject(self, self.instance)
1581
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1582
    return env
1583

    
1584
  def BuildHooksNodes(self):
1585
    """Build hooks nodes.
1586

1587
    """
1588
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1589
    return (nl, nl)
1590

    
1591
  def CheckPrereq(self):
1592
    """Check prerequisites.
1593

1594
    This checks that the instance is in the cluster and is not running.
1595

1596
    """
1597
    (self.op.instance_uuid, self.op.instance_name) = \
1598
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1599
                                self.op.instance_name)
1600
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1601
    assert instance is not None
1602

    
1603
    # It should actually not happen that an instance is running with a disabled
1604
    # disk template, but in case it does, the renaming of file-based instances
1605
    # will fail horribly. Thus, we test it before.
1606
    if (instance.disk_template in constants.DTS_FILEBASED and
1607
        self.op.new_name != instance.name):
1608
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1609
                               instance.disk_template)
1610

    
1611
    CheckNodeOnline(self, instance.primary_node)
1612
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1613
                       msg="cannot rename")
1614
    self.instance = instance
1615

    
1616
    new_name = self.op.new_name
1617
    if self.op.name_check:
1618
      hostname = _CheckHostnameSane(self, new_name)
1619
      new_name = self.op.new_name = hostname.name
1620
      if (self.op.ip_check and
1621
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1622
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1623
                                   (hostname.ip, new_name),
1624
                                   errors.ECODE_NOTUNIQUE)
1625

    
1626
    instance_names = [inst.name for
1627
                      inst in self.cfg.GetAllInstancesInfo().values()]
1628
    if new_name in instance_names and new_name != instance.name:
1629
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1630
                                 new_name, errors.ECODE_EXISTS)
1631

    
1632
  def Exec(self, feedback_fn):
1633
    """Rename the instance.
1634

1635
    """
1636
    old_name = self.instance.name
1637

    
1638
    rename_file_storage = False
1639
    if (self.instance.disk_template in (constants.DT_FILE,
1640
                                        constants.DT_SHARED_FILE) and
1641
        self.op.new_name != self.instance.name):
1642
      old_file_storage_dir = os.path.dirname(
1643
                               self.instance.disks[0].logical_id[1])
1644
      rename_file_storage = True
1645

    
1646
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1647
    # Change the instance lock. This is definitely safe while we hold the BGL.
1648
    # Otherwise the new lock would have to be added in acquired mode.
1649
    assert self.REQ_BGL
1650
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1651
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1652
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1653

    
1654
    # re-read the instance from the configuration after rename
1655
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1656

    
1657
    if rename_file_storage:
1658
      new_file_storage_dir = os.path.dirname(
1659
                               renamed_inst.disks[0].logical_id[1])
1660
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1661
                                                     old_file_storage_dir,
1662
                                                     new_file_storage_dir)
1663
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1664
                   " (but the instance has been renamed in Ganeti)" %
1665
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1666
                    old_file_storage_dir, new_file_storage_dir))
1667

    
1668
    StartInstanceDisks(self, renamed_inst, None)
1669
    # update info on disks
1670
    info = GetInstanceInfoText(renamed_inst)
1671
    for (idx, disk) in enumerate(renamed_inst.disks):
1672
      for node_uuid in renamed_inst.all_nodes:
1673
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1674
                                                (disk, renamed_inst), info)
1675
        result.Warn("Error setting info on node %s for disk %s" %
1676
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1677
    try:
1678
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1679
                                                 renamed_inst, old_name,
1680
                                                 self.op.debug_level)
1681
      result.Warn("Could not run OS rename script for instance %s on node %s"
1682
                  " (but the instance has been renamed in Ganeti)" %
1683
                  (renamed_inst.name,
1684
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1685
                  self.LogWarning)
1686
    finally:
1687
      ShutdownInstanceDisks(self, renamed_inst)
1688

    
1689
    return renamed_inst.name
1690

    
1691

    
1692
class LUInstanceRemove(LogicalUnit):
1693
  """Remove an instance.
1694

1695
  """
1696
  HPATH = "instance-remove"
1697
  HTYPE = constants.HTYPE_INSTANCE
1698
  REQ_BGL = False
1699

    
1700
  def ExpandNames(self):
1701
    self._ExpandAndLockInstance()
1702
    self.needed_locks[locking.LEVEL_NODE] = []
1703
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1704
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1705

    
1706
  def DeclareLocks(self, level):
1707
    if level == locking.LEVEL_NODE:
1708
      self._LockInstancesNodes()
1709
    elif level == locking.LEVEL_NODE_RES:
1710
      # Copy node locks
1711
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1712
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1713

    
1714
  def BuildHooksEnv(self):
1715
    """Build hooks env.
1716

1717
    This runs on master, primary and secondary nodes of the instance.
1718

1719
    """
1720
    env = BuildInstanceHookEnvByObject(self, self.instance)
1721
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1722
    return env
1723

    
1724
  def BuildHooksNodes(self):
1725
    """Build hooks nodes.
1726

1727
    """
1728
    nl = [self.cfg.GetMasterNode()]
1729
    nl_post = list(self.instance.all_nodes) + nl
1730
    return (nl, nl_post)
1731

    
1732
  def CheckPrereq(self):
1733
    """Check prerequisites.
1734

1735
    This checks that the instance is in the cluster.
1736

1737
    """
1738
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1739
    assert self.instance is not None, \
1740
      "Cannot retrieve locked instance %s" % self.op.instance_name
1741

    
1742
  def Exec(self, feedback_fn):
1743
    """Remove the instance.
1744

1745
    """
1746
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1747
                 self.cfg.GetNodeName(self.instance.primary_node))
1748

    
1749
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1750
                                             self.instance,
1751
                                             self.op.shutdown_timeout,
1752
                                             self.op.reason)
1753
    if self.op.ignore_failures:
1754
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1755
    else:
1756
      result.Raise("Could not shutdown instance %s on node %s" %
1757
                   (self.instance.name,
1758
                    self.cfg.GetNodeName(self.instance.primary_node)))
1759

    
1760
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1761
            self.owned_locks(locking.LEVEL_NODE_RES))
1762
    assert not (set(self.instance.all_nodes) -
1763
                self.owned_locks(locking.LEVEL_NODE)), \
1764
      "Not owning correct locks"
1765

    
1766
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1767

    
1768

    
1769
class LUInstanceMove(LogicalUnit):
1770
  """Move an instance by data-copying.
1771

1772
  """
1773
  HPATH = "instance-move"
1774
  HTYPE = constants.HTYPE_INSTANCE
1775
  REQ_BGL = False
1776

    
1777
  def ExpandNames(self):
1778
    self._ExpandAndLockInstance()
1779
    (self.op.target_node_uuid, self.op.target_node) = \
1780
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1781
                            self.op.target_node)
1782
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1783
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1784
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1785

    
1786
  def DeclareLocks(self, level):
1787
    if level == locking.LEVEL_NODE:
1788
      self._LockInstancesNodes(primary_only=True)
1789
    elif level == locking.LEVEL_NODE_RES:
1790
      # Copy node locks
1791
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1792
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1793

    
1794
  def BuildHooksEnv(self):
1795
    """Build hooks env.
1796

1797
    This runs on master, primary and target nodes of the instance.
1798

1799
    """
1800
    env = {
1801
      "TARGET_NODE": self.op.target_node,
1802
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1803
      }
1804
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1805
    return env
1806

    
1807
  def BuildHooksNodes(self):
1808
    """Build hooks nodes.
1809

1810
    """
1811
    nl = [
1812
      self.cfg.GetMasterNode(),
1813
      self.instance.primary_node,
1814
      self.op.target_node_uuid,
1815
      ]
1816
    return (nl, nl)
1817

    
1818
  def CheckPrereq(self):
1819
    """Check prerequisites.
1820

1821
    This checks that the instance is in the cluster.
1822

1823
    """
1824
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1825
    assert self.instance is not None, \
1826
      "Cannot retrieve locked instance %s" % self.op.instance_name
1827

    
1828
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1829
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1830
                                 self.instance.disk_template,
1831
                                 errors.ECODE_STATE)
1832

    
1833
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1834
    assert target_node is not None, \
1835
      "Cannot retrieve locked node %s" % self.op.target_node
1836

    
1837
    self.target_node_uuid = target_node.uuid
1838
    if target_node.uuid == self.instance.primary_node:
1839
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1840
                                 (self.instance.name, target_node.name),
1841
                                 errors.ECODE_STATE)
1842

    
1843
    cluster = self.cfg.GetClusterInfo()
1844
    bep = cluster.FillBE(self.instance)
1845

    
1846
    for idx, dsk in enumerate(self.instance.disks):
1847
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1848
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1849
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1850
                                   " cannot copy" % idx, errors.ECODE_STATE)
1851

    
1852
    CheckNodeOnline(self, target_node.uuid)
1853
    CheckNodeNotDrained(self, target_node.uuid)
1854
    CheckNodeVmCapable(self, target_node.uuid)
1855
    group_info = self.cfg.GetNodeGroup(target_node.group)
1856
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1857
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1858
                           ignore=self.op.ignore_ipolicy)
1859

    
1860
    if self.instance.admin_state == constants.ADMINST_UP:
1861
      # check memory requirements on the target node
1862
      CheckNodeFreeMemory(
1863
          self, target_node.uuid, "failing over instance %s" %
1864
          self.instance.name, bep[constants.BE_MAXMEM],
1865
          self.instance.hypervisor,
1866
          cluster.hvparams[self.instance.hypervisor])
1867
    else:
1868
      self.LogInfo("Not checking memory on the secondary node as"
1869
                   " instance will not be started")
1870

    
1871
    # check bridge existance
1872
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1873

    
1874
  def Exec(self, feedback_fn):
1875
    """Move an instance.
1876

1877
    The move is done by shutting it down on its present node, copying
1878
    the data over (slow) and starting it on the new node.
1879

1880
    """
1881
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1882
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1883

    
1884
    self.LogInfo("Shutting down instance %s on source node %s",
1885
                 self.instance.name, source_node.name)
1886

    
1887
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1888
            self.owned_locks(locking.LEVEL_NODE_RES))
1889

    
1890
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1891
                                             self.op.shutdown_timeout,
1892
                                             self.op.reason)
1893
    if self.op.ignore_consistency:
1894
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1895
                  " anyway. Please make sure node %s is down. Error details" %
1896
                  (self.instance.name, source_node.name, source_node.name),
1897
                  self.LogWarning)
1898
    else:
1899
      result.Raise("Could not shutdown instance %s on node %s" %
1900
                   (self.instance.name, source_node.name))
1901

    
1902
    # create the target disks
1903
    try:
1904
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1905
    except errors.OpExecError:
1906
      self.LogWarning("Device creation failed")
1907
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1908
      raise
1909

    
1910
    errs = []
1911
    transfers = []
1912
    # activate, get path, create transfer jobs
1913
    for idx, disk in enumerate(self.instance.disks):
1914
      # FIXME: pass debug option from opcode to backend
1915
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1916
                                         constants.IEIO_RAW_DISK,
1917
                                         (disk, self.instance),
1918
                                         constants.IEIO_RAW_DISK,
1919
                                         (disk, self.instance),
1920
                                         None)
1921
      transfers.append(dt)
1922

    
1923
    import_result = \
1924
      masterd.instance.TransferInstanceData(self, feedback_fn,
1925
                                            source_node.uuid,
1926
                                            target_node.uuid,
1927
                                            target_node.secondary_ip,
1928
                                            self.op.compress,
1929
                                            self.instance, transfers)
1930
    if not compat.all(import_result):
1931
      errs.append("Failed to transfer instance data")
1932

    
1933
    if errs:
1934
      self.LogWarning("Some disks failed to copy, aborting")
1935
      try:
1936
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1937
      finally:
1938
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1939
        raise errors.OpExecError("Errors during disk copy: %s" %
1940
                                 (",".join(errs),))
1941

    
1942
    self.instance.primary_node = target_node.uuid
1943
    self.cfg.Update(self.instance, feedback_fn)
1944

    
1945
    self.LogInfo("Removing the disks on the original node")
1946
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1947

    
1948
    # Only start the instance if it's marked as up
1949
    if self.instance.admin_state == constants.ADMINST_UP:
1950
      self.LogInfo("Starting instance %s on node %s",
1951
                   self.instance.name, target_node.name)
1952

    
1953
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1954
                                          ignore_secondaries=True)
1955
      if not disks_ok:
1956
        ShutdownInstanceDisks(self, self.instance)
1957
        raise errors.OpExecError("Can't activate the instance's disks")
1958

    
1959
      result = self.rpc.call_instance_start(target_node.uuid,
1960
                                            (self.instance, None, None), False,
1961
                                            self.op.reason)
1962
      msg = result.fail_msg
1963
      if msg:
1964
        ShutdownInstanceDisks(self, self.instance)
1965
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1966
                                 (self.instance.name, target_node.name, msg))
1967

    
1968

    
1969
class LUInstanceMultiAlloc(NoHooksLU):
1970
  """Allocates multiple instances at the same time.
1971

1972
  """
1973
  REQ_BGL = False
1974

    
1975
  def CheckArguments(self):
1976
    """Check arguments.
1977

1978
    """
1979
    nodes = []
1980
    for inst in self.op.instances:
1981
      if inst.iallocator is not None:
1982
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1983
                                   " instance objects", errors.ECODE_INVAL)
1984
      nodes.append(bool(inst.pnode))
1985
      if inst.disk_template in constants.DTS_INT_MIRROR:
1986
        nodes.append(bool(inst.snode))
1987

    
1988
    has_nodes = compat.any(nodes)
1989
    if compat.all(nodes) ^ has_nodes:
1990
      raise errors.OpPrereqError("There are instance objects providing"
1991
                                 " pnode/snode while others do not",
1992
                                 errors.ECODE_INVAL)
1993

    
1994
    if not has_nodes and self.op.iallocator is None:
1995
      default_iallocator = self.cfg.GetDefaultIAllocator()
1996
      if default_iallocator:
1997
        self.op.iallocator = default_iallocator
1998
      else:
1999
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2000
                                   " given and no cluster-wide default"
2001
                                   " iallocator found; please specify either"
2002
                                   " an iallocator or nodes on the instances"
2003
                                   " or set a cluster-wide default iallocator",
2004
                                   errors.ECODE_INVAL)
2005

    
2006
    _CheckOpportunisticLocking(self.op)
2007

    
2008
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2009
    if dups:
2010
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2011
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2012

    
2013
  def ExpandNames(self):
2014
    """Calculate the locks.
2015

2016
    """
2017
    self.share_locks = ShareAll()
2018
    self.needed_locks = {
2019
      # iallocator will select nodes and even if no iallocator is used,
2020
      # collisions with LUInstanceCreate should be avoided
2021
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2022
      }
2023

    
2024
    if self.op.iallocator:
2025
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2026
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2027

    
2028
      if self.op.opportunistic_locking:
2029
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2030
    else:
2031
      nodeslist = []
2032
      for inst in self.op.instances:
2033
        (inst.pnode_uuid, inst.pnode) = \
2034
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2035
        nodeslist.append(inst.pnode_uuid)
2036
        if inst.snode is not None:
2037
          (inst.snode_uuid, inst.snode) = \
2038
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2039
          nodeslist.append(inst.snode_uuid)
2040

    
2041
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2042
      # Lock resources of instance's primary and secondary nodes (copy to
2043
      # prevent accidential modification)
2044
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2045

    
2046
  def DeclareLocks(self, level):
2047
    if level == locking.LEVEL_NODE_RES and \
2048
      self.opportunistic_locks[locking.LEVEL_NODE]:
2049
      # Even when using opportunistic locking, we require the same set of
2050
      # NODE_RES locks as we got NODE locks
2051
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2052
        self.owned_locks(locking.LEVEL_NODE)
2053

    
2054
  def CheckPrereq(self):
2055
    """Check prerequisite.
2056

2057
    """
2058
    if self.op.iallocator:
2059
      cluster = self.cfg.GetClusterInfo()
2060
      default_vg = self.cfg.GetVGName()
2061
      ec_id = self.proc.GetECId()
2062

    
2063
      if self.op.opportunistic_locking:
2064
        # Only consider nodes for which a lock is held
2065
        node_whitelist = self.cfg.GetNodeNames(
2066
                           list(self.owned_locks(locking.LEVEL_NODE)))
2067
      else:
2068
        node_whitelist = None
2069

    
2070
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2071
                                           _ComputeNics(op, cluster, None,
2072
                                                        self.cfg, ec_id),
2073
                                           _ComputeFullBeParams(op, cluster),
2074
                                           node_whitelist)
2075
               for op in self.op.instances]
2076

    
2077
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2078
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2079

    
2080
      ial.Run(self.op.iallocator)
2081

    
2082
      if not ial.success:
2083
        raise errors.OpPrereqError("Can't compute nodes using"
2084
                                   " iallocator '%s': %s" %
2085
                                   (self.op.iallocator, ial.info),
2086
                                   errors.ECODE_NORES)
2087

    
2088
      self.ia_result = ial.result
2089

    
2090
    if self.op.dry_run:
2091
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2092
        constants.JOB_IDS_KEY: [],
2093
        })
2094

    
2095
  def _ConstructPartialResult(self):
2096
    """Contructs the partial result.
2097

2098
    """
2099
    if self.op.iallocator:
2100
      (allocatable, failed_insts) = self.ia_result
2101
      allocatable_insts = map(compat.fst, allocatable)
2102
    else:
2103
      allocatable_insts = [op.instance_name for op in self.op.instances]
2104
      failed_insts = []
2105

    
2106
    return {
2107
      constants.ALLOCATABLE_KEY: allocatable_insts,
2108
      constants.FAILED_KEY: failed_insts,
2109
      }
2110

    
2111
  def Exec(self, feedback_fn):
2112
    """Executes the opcode.
2113

2114
    """
2115
    jobs = []
2116
    if self.op.iallocator:
2117
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2118
      (allocatable, failed) = self.ia_result
2119

    
2120
      for (name, node_names) in allocatable:
2121
        op = op2inst.pop(name)
2122

    
2123
        (op.pnode_uuid, op.pnode) = \
2124
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2125
        if len(node_names) > 1:
2126
          (op.snode_uuid, op.snode) = \
2127
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2128

    
2129
          jobs.append([op])
2130

    
2131
        missing = set(op2inst.keys()) - set(failed)
2132
        assert not missing, \
2133
          "Iallocator did return incomplete result: %s" % \
2134
          utils.CommaJoin(missing)
2135
    else:
2136
      jobs.extend([op] for op in self.op.instances)
2137

    
2138
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2139

    
2140

    
2141
class _InstNicModPrivate:
2142
  """Data structure for network interface modifications.
2143

2144
  Used by L{LUInstanceSetParams}.
2145

2146
  """
2147
  def __init__(self):
2148
    self.params = None
2149
    self.filled = None
2150

    
2151

    
2152
def _PrepareContainerMods(mods, private_fn):
2153
  """Prepares a list of container modifications by adding a private data field.
2154

2155
  @type mods: list of tuples; (operation, index, parameters)
2156
  @param mods: List of modifications
2157
  @type private_fn: callable or None
2158
  @param private_fn: Callable for constructing a private data field for a
2159
    modification
2160
  @rtype: list
2161

2162
  """
2163
  if private_fn is None:
2164
    fn = lambda: None
2165
  else:
2166
    fn = private_fn
2167

    
2168
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2169

    
2170

    
2171
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2172
  """Checks if nodes have enough physical CPUs
2173

2174
  This function checks if all given nodes have the needed number of
2175
  physical CPUs. In case any node has less CPUs or we cannot get the
2176
  information from the node, this function raises an OpPrereqError
2177
  exception.
2178

2179
  @type lu: C{LogicalUnit}
2180
  @param lu: a logical unit from which we get configuration data
2181
  @type node_uuids: C{list}
2182
  @param node_uuids: the list of node UUIDs to check
2183
  @type requested: C{int}
2184
  @param requested: the minimum acceptable number of physical CPUs
2185
  @type hypervisor_specs: list of pairs (string, dict of strings)
2186
  @param hypervisor_specs: list of hypervisor specifications in
2187
      pairs (hypervisor_name, hvparams)
2188
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2189
      or we cannot check the node
2190

2191
  """
2192
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2193
  for node_uuid in node_uuids:
2194
    info = nodeinfo[node_uuid]
2195
    node_name = lu.cfg.GetNodeName(node_uuid)
2196
    info.Raise("Cannot get current information from node %s" % node_name,
2197
               prereq=True, ecode=errors.ECODE_ENVIRON)
2198
    (_, _, (hv_info, )) = info.payload
2199
    num_cpus = hv_info.get("cpu_total", None)
2200
    if not isinstance(num_cpus, int):
2201
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2202
                                 " on node %s, result was '%s'" %
2203
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2204
    if requested > num_cpus:
2205
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2206
                                 "required" % (node_name, num_cpus, requested),
2207
                                 errors.ECODE_NORES)
2208

    
2209

    
2210
def GetItemFromContainer(identifier, kind, container):
2211
  """Return the item refered by the identifier.
2212

2213
  @type identifier: string
2214
  @param identifier: Item index or name or UUID
2215
  @type kind: string
2216
  @param kind: One-word item description
2217
  @type container: list
2218
  @param container: Container to get the item from
2219

2220
  """
2221
  # Index
2222
  try:
2223
    idx = int(identifier)
2224
    if idx == -1:
2225
      # Append
2226
      absidx = len(container) - 1
2227
    elif idx < 0:
2228
      raise IndexError("Not accepting negative indices other than -1")
2229
    elif idx > len(container):
2230
      raise IndexError("Got %s index %s, but there are only %s" %
2231
                       (kind, idx, len(container)))
2232
    else:
2233
      absidx = idx
2234
    return (absidx, container[idx])
2235
  except ValueError:
2236
    pass
2237

    
2238
  for idx, item in enumerate(container):
2239
    if item.uuid == identifier or item.name == identifier:
2240
      return (idx, item)
2241

    
2242
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2243
                             (kind, identifier), errors.ECODE_NOENT)
2244

    
2245

    
2246
def _ApplyContainerMods(kind, container, chgdesc, mods,
2247
                        create_fn, modify_fn, remove_fn,
2248
                        post_add_fn=None):
2249
  """Applies descriptions in C{mods} to C{container}.
2250

2251
  @type kind: string
2252
  @param kind: One-word item description
2253
  @type container: list
2254
  @param container: Container to modify
2255
  @type chgdesc: None or list
2256
  @param chgdesc: List of applied changes
2257
  @type mods: list
2258
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2259
  @type create_fn: callable
2260
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2261
    receives absolute item index, parameters and private data object as added
2262
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2263
    as list
2264
  @type modify_fn: callable
2265
  @param modify_fn: Callback for modifying an existing item
2266
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2267
    and private data object as added by L{_PrepareContainerMods}, returns
2268
    changes as list
2269
  @type remove_fn: callable
2270
  @param remove_fn: Callback on removing item; receives absolute item index,
2271
    item and private data object as added by L{_PrepareContainerMods}
2272
  @type post_add_fn: callable
2273
  @param post_add_fn: Callable for post-processing a newly created item after
2274
    it has been put into the container. It receives the index of the new item
2275
    and the new item as parameters.
2276

2277
  """
2278
  for (op, identifier, params, private) in mods:
2279
    changes = None
2280

    
2281
    if op == constants.DDM_ADD:
2282
      # Calculate where item will be added
2283
      # When adding an item, identifier can only be an index
2284
      try:
2285
        idx = int(identifier)
2286
      except ValueError:
2287
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2288
                                   " identifier for %s" % constants.DDM_ADD,
2289
                                   errors.ECODE_INVAL)
2290
      if idx == -1:
2291
        addidx = len(container)
2292
      else:
2293
        if idx < 0:
2294
          raise IndexError("Not accepting negative indices other than -1")
2295
        elif idx > len(container):
2296
          raise IndexError("Got %s index %s, but there are only %s" %
2297
                           (kind, idx, len(container)))
2298
        addidx = idx
2299

    
2300
      if create_fn is None:
2301
        item = params
2302
      else:
2303
        (item, changes) = create_fn(addidx, params, private)
2304

    
2305
      if idx == -1:
2306
        container.append(item)
2307
      else:
2308
        assert idx >= 0
2309
        assert idx <= len(container)
2310
        # list.insert does so before the specified index
2311
        container.insert(idx, item)
2312

    
2313
      if post_add_fn is not None:
2314
        post_add_fn(addidx, item)
2315

    
2316
    else:
2317
      # Retrieve existing item
2318
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2319

    
2320
      if op == constants.DDM_REMOVE:
2321
        assert not params
2322

    
2323
        changes = [("%s/%s" % (kind, absidx), "remove")]
2324

    
2325
        if remove_fn is not None:
2326
          msg = remove_fn(absidx, item, private)
2327
          if msg:
2328
            changes.append(("%s/%s" % (kind, absidx), msg))
2329

    
2330
        assert container[absidx] == item
2331
        del container[absidx]
2332
      elif op == constants.DDM_MODIFY:
2333
        if modify_fn is not None:
2334
          changes = modify_fn(absidx, item, params, private)
2335
      else:
2336
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2337

    
2338
    assert _TApplyContModsCbChanges(changes)
2339

    
2340
    if not (chgdesc is None or changes is None):
2341
      chgdesc.extend(changes)
2342

    
2343

    
2344
def _UpdateIvNames(base_index, disks):
2345
  """Updates the C{iv_name} attribute of disks.
2346

2347
  @type disks: list of L{objects.Disk}
2348

2349
  """
2350
  for (idx, disk) in enumerate(disks):
2351
    disk.iv_name = "disk/%s" % (base_index + idx, )
2352

    
2353

    
2354
class LUInstanceSetParams(LogicalUnit):
2355
  """Modifies an instances's parameters.
2356

2357
  """
2358
  HPATH = "instance-modify"
2359
  HTYPE = constants.HTYPE_INSTANCE
2360
  REQ_BGL = False
2361

    
2362
  @staticmethod
2363
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2364
    assert ht.TList(mods)
2365
    assert not mods or len(mods[0]) in (2, 3)
2366

    
2367
    if mods and len(mods[0]) == 2:
2368
      result = []
2369

    
2370
      addremove = 0
2371
      for op, params in mods:
2372
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2373
          result.append((op, -1, params))
2374
          addremove += 1
2375

    
2376
          if addremove > 1:
2377
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2378
                                       " supported at a time" % kind,
2379
                                       errors.ECODE_INVAL)
2380
        else:
2381
          result.append((constants.DDM_MODIFY, op, params))
2382

    
2383
      assert verify_fn(result)
2384
    else:
2385
      result = mods
2386

    
2387
    return result
2388

    
2389
  @staticmethod
2390
  def _CheckMods(kind, mods, key_types, item_fn):
2391
    """Ensures requested disk/NIC modifications are valid.
2392

2393
    """
2394
    for (op, _, params) in mods:
2395
      assert ht.TDict(params)
2396

    
2397
      # If 'key_types' is an empty dict, we assume we have an
2398
      # 'ext' template and thus do not ForceDictType
2399
      if key_types:
2400
        utils.ForceDictType(params, key_types)
2401

    
2402
      if op == constants.DDM_REMOVE:
2403
        if params:
2404
          raise errors.OpPrereqError("No settings should be passed when"
2405
                                     " removing a %s" % kind,
2406
                                     errors.ECODE_INVAL)
2407
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2408
        item_fn(op, params)
2409
      else:
2410
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2411

    
2412
  def _VerifyDiskModification(self, op, params, excl_stor):
2413
    """Verifies a disk modification.
2414

2415
    """
2416
    if op == constants.DDM_ADD:
2417
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2418
      if mode not in constants.DISK_ACCESS_SET:
2419
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2420
                                   errors.ECODE_INVAL)
2421

    
2422
      size = params.get(constants.IDISK_SIZE, None)
2423
      if size is None:
2424
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2425
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2426
      size = int(size)
2427

    
2428
      params[constants.IDISK_SIZE] = size
2429
      name = params.get(constants.IDISK_NAME, None)
2430
      if name is not None and name.lower() == constants.VALUE_NONE:
2431
        params[constants.IDISK_NAME] = None
2432

    
2433
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2434

    
2435
    elif op == constants.DDM_MODIFY:
2436
      if constants.IDISK_SIZE in params:
2437
        raise errors.OpPrereqError("Disk size change not possible, use"
2438
                                   " grow-disk", errors.ECODE_INVAL)
2439

    
2440
      # Disk modification supports changing only the disk name and mode.
2441
      # Changing arbitrary parameters is allowed only for ext disk template",
2442
      if self.instance.disk_template != constants.DT_EXT:
2443
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2444

    
2445
      name = params.get(constants.IDISK_NAME, None)
2446
      if name is not None and name.lower() == constants.VALUE_NONE:
2447
        params[constants.IDISK_NAME] = None
2448

    
2449
  @staticmethod
2450
  def _VerifyNicModification(op, params):
2451
    """Verifies a network interface modification.
2452

2453
    """
2454
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2455
      ip = params.get(constants.INIC_IP, None)
2456
      name = params.get(constants.INIC_NAME, None)
2457
      req_net = params.get(constants.INIC_NETWORK, None)
2458
      link = params.get(constants.NIC_LINK, None)
2459
      mode = params.get(constants.NIC_MODE, None)
2460
      if name is not None and name.lower() == constants.VALUE_NONE:
2461
        params[constants.INIC_NAME] = None
2462
      if req_net is not None:
2463
        if req_net.lower() == constants.VALUE_NONE:
2464
          params[constants.INIC_NETWORK] = None
2465
          req_net = None
2466
        elif link is not None or mode is not None:
2467
          raise errors.OpPrereqError("If network is given"
2468
                                     " mode or link should not",
2469
                                     errors.ECODE_INVAL)
2470

    
2471
      if op == constants.DDM_ADD:
2472
        macaddr = params.get(constants.INIC_MAC, None)
2473
        if macaddr is None:
2474
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2475

    
2476
      if ip is not None:
2477
        if ip.lower() == constants.VALUE_NONE:
2478
          params[constants.INIC_IP] = None
2479
        else:
2480
          if ip.lower() == constants.NIC_IP_POOL:
2481
            if op == constants.DDM_ADD and req_net is None:
2482
              raise errors.OpPrereqError("If ip=pool, parameter network"
2483
                                         " cannot be none",
2484
                                         errors.ECODE_INVAL)
2485
          else:
2486
            if not netutils.IPAddress.IsValid(ip):
2487
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2488
                                         errors.ECODE_INVAL)
2489

    
2490
      if constants.INIC_MAC in params:
2491
        macaddr = params[constants.INIC_MAC]
2492
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2493
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2494

    
2495
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2496
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2497
                                     " modifying an existing NIC",
2498
                                     errors.ECODE_INVAL)
2499

    
2500
  def CheckArguments(self):
2501
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2502
            self.op.hvparams or self.op.beparams or self.op.os_name or
2503
            self.op.osparams or self.op.offline is not None or
2504
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2505
            self.op.instance_communication is not None):
2506
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2507

    
2508
    if self.op.hvparams:
2509
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2510
                           "hypervisor", "instance", "cluster")
2511

    
2512
    self.op.disks = self._UpgradeDiskNicMods(
2513
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2514
    self.op.nics = self._UpgradeDiskNicMods(
2515
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2516

    
2517
    if self.op.disks and self.op.disk_template is not None:
2518
      raise errors.OpPrereqError("Disk template conversion and other disk"
2519
                                 " changes not supported at the same time",
2520
                                 errors.ECODE_INVAL)
2521

    
2522
    if (self.op.disk_template and
2523
        self.op.disk_template in constants.DTS_INT_MIRROR and
2524
        self.op.remote_node is None):
2525
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2526
                                 " one requires specifying a secondary node",
2527
                                 errors.ECODE_INVAL)
2528

    
2529
    # Check NIC modifications
2530
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2531
                    self._VerifyNicModification)
2532

    
2533
    if self.op.pnode:
2534
      (self.op.pnode_uuid, self.op.pnode) = \
2535
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2536

    
2537
  def ExpandNames(self):
2538
    self._ExpandAndLockInstance()
2539
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2540
    # Can't even acquire node locks in shared mode as upcoming changes in
2541
    # Ganeti 2.6 will start to modify the node object on disk conversion
2542
    self.needed_locks[locking.LEVEL_NODE] = []
2543
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2544
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2545
    # Look node group to look up the ipolicy
2546
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2547

    
2548
  def DeclareLocks(self, level):
2549
    if level == locking.LEVEL_NODEGROUP:
2550
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2551
      # Acquire locks for the instance's nodegroups optimistically. Needs
2552
      # to be verified in CheckPrereq
2553
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2554
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2555
    elif level == locking.LEVEL_NODE:
2556
      self._LockInstancesNodes()
2557
      if self.op.disk_template and self.op.remote_node:
2558
        (self.op.remote_node_uuid, self.op.remote_node) = \
2559
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2560
                                self.op.remote_node)
2561
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2562
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2563
      # Copy node locks
2564
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2565
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2566

    
2567
  def BuildHooksEnv(self):
2568
    """Build hooks env.
2569

2570
    This runs on the master, primary and secondaries.
2571

2572
    """
2573
    args = {}
2574
    if constants.BE_MINMEM in self.be_new:
2575
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2576
    if constants.BE_MAXMEM in self.be_new:
2577
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2578
    if constants.BE_VCPUS in self.be_new:
2579
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2580
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2581
    # information at all.
2582

    
2583
    if self._new_nics is not None:
2584
      nics = []
2585

    
2586
      for nic in self._new_nics:
2587
        n = copy.deepcopy(nic)
2588
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2589
        n.nicparams = nicparams
2590
        nics.append(NICToTuple(self, n))
2591

    
2592
      args["nics"] = nics
2593

    
2594
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2595
    if self.op.disk_template:
2596
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2597
    if self.op.runtime_mem:
2598
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2599

    
2600
    return env
2601

    
2602
  def BuildHooksNodes(self):
2603
    """Build hooks nodes.
2604

2605
    """
2606
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2607
    return (nl, nl)
2608

    
2609
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2610
                              old_params, cluster, pnode_uuid):
2611

    
2612
    update_params_dict = dict([(key, params[key])
2613
                               for key in constants.NICS_PARAMETERS
2614
                               if key in params])
2615

    
2616
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2617
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2618

    
2619
    new_net_uuid = None
2620
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2621
    if new_net_uuid_or_name:
2622
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2623
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2624

    
2625
    if old_net_uuid:
2626
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2627

    
2628
    if new_net_uuid:
2629
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2630
      if not netparams:
2631
        raise errors.OpPrereqError("No netparams found for the network"
2632
                                   " %s, probably not connected" %
2633
                                   new_net_obj.name, errors.ECODE_INVAL)
2634
      new_params = dict(netparams)
2635
    else:
2636
      new_params = GetUpdatedParams(old_params, update_params_dict)
2637

    
2638
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2639

    
2640
    new_filled_params = cluster.SimpleFillNIC(new_params)
2641
    objects.NIC.CheckParameterSyntax(new_filled_params)
2642

    
2643
    new_mode = new_filled_params[constants.NIC_MODE]
2644
    if new_mode == constants.NIC_MODE_BRIDGED:
2645
      bridge = new_filled_params[constants.NIC_LINK]
2646
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2647
      if msg:
2648
        msg = "Error checking bridges on node '%s': %s" % \
2649
                (self.cfg.GetNodeName(pnode_uuid), msg)
2650
        if self.op.force:
2651
          self.warn.append(msg)
2652
        else:
2653
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2654

    
2655
    elif new_mode == constants.NIC_MODE_ROUTED:
2656
      ip = params.get(constants.INIC_IP, old_ip)
2657
      if ip is None:
2658
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2659
                                   " on a routed NIC", errors.ECODE_INVAL)
2660

    
2661
    elif new_mode == constants.NIC_MODE_OVS:
2662
      # TODO: check OVS link
2663
      self.LogInfo("OVS links are currently not checked for correctness")
2664

    
2665
    if constants.INIC_MAC in params:
2666
      mac = params[constants.INIC_MAC]
2667
      if mac is None:
2668
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2669
                                   errors.ECODE_INVAL)
2670
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2671
        # otherwise generate the MAC address
2672
        params[constants.INIC_MAC] = \
2673
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2674
      else:
2675
        # or validate/reserve the current one
2676
        try:
2677
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2678
        except errors.ReservationError:
2679
          raise errors.OpPrereqError("MAC address '%s' already in use"
2680
                                     " in cluster" % mac,
2681
                                     errors.ECODE_NOTUNIQUE)
2682
    elif new_net_uuid != old_net_uuid:
2683

    
2684
      def get_net_prefix(net_uuid):
2685
        mac_prefix = None
2686
        if net_uuid:
2687
          nobj = self.cfg.GetNetwork(net_uuid)
2688
          mac_prefix = nobj.mac_prefix
2689

    
2690
        return mac_prefix
2691

    
2692
      new_prefix = get_net_prefix(new_net_uuid)
2693
      old_prefix = get_net_prefix(old_net_uuid)
2694
      if old_prefix != new_prefix:
2695
        params[constants.INIC_MAC] = \
2696
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2697

    
2698
    # if there is a change in (ip, network) tuple
2699
    new_ip = params.get(constants.INIC_IP, old_ip)
2700
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2701
      if new_ip:
2702
        # if IP is pool then require a network and generate one IP
2703
        if new_ip.lower() == constants.NIC_IP_POOL:
2704
          if new_net_uuid:
2705
            try:
2706
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2707
            except errors.ReservationError:
2708
              raise errors.OpPrereqError("Unable to get a free IP"
2709
                                         " from the address pool",
2710
                                         errors.ECODE_STATE)
2711
            self.LogInfo("Chose IP %s from network %s",
2712
                         new_ip,
2713
                         new_net_obj.name)
2714
            params[constants.INIC_IP] = new_ip
2715
          else:
2716
            raise errors.OpPrereqError("ip=pool, but no network found",
2717
                                       errors.ECODE_INVAL)
2718
        # Reserve new IP if in the new network if any
2719
        elif new_net_uuid:
2720
          try:
2721
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2722
                               check=self.op.conflicts_check)
2723
            self.LogInfo("Reserving IP %s in network %s",
2724
                         new_ip, new_net_obj.name)
2725
          except errors.ReservationError:
2726
            raise errors.OpPrereqError("IP %s not available in network %s" %
2727
                                       (new_ip, new_net_obj.name),
2728
                                       errors.ECODE_NOTUNIQUE)
2729
        # new network is None so check if new IP is a conflicting IP
2730
        elif self.op.conflicts_check:
2731
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2732

    
2733
      # release old IP if old network is not None
2734
      if old_ip and old_net_uuid:
2735
        try:
2736
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2737
        except errors.AddressPoolError:
2738
          logging.warning("Release IP %s not contained in network %s",
2739
                          old_ip, old_net_obj.name)
2740

    
2741
    # there are no changes in (ip, network) tuple and old network is not None
2742
    elif (old_net_uuid is not None and
2743
          (req_link is not None or req_mode is not None)):
2744
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2745
                                 " a NIC that is connected to a network",
2746
                                 errors.ECODE_INVAL)
2747

    
2748
    private.params = new_params
2749
    private.filled = new_filled_params
2750

    
2751
  def _PreCheckDiskTemplate(self, pnode_info):
2752
    """CheckPrereq checks related to a new disk template."""
2753
    # Arguments are passed to avoid configuration lookups
2754
    pnode_uuid = self.instance.primary_node
2755
    if self.instance.disk_template == self.op.disk_template:
2756
      raise errors.OpPrereqError("Instance already has disk template %s" %
2757
                                 self.instance.disk_template,
2758
                                 errors.ECODE_INVAL)
2759

    
2760
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2761
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2762
                                 " cluster." % self.op.disk_template)
2763

    
2764
    if (self.instance.disk_template,
2765
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2766
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2767
                                 " %s to %s" % (self.instance.disk_template,
2768
                                                self.op.disk_template),
2769
                                 errors.ECODE_INVAL)
2770
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2771
                       msg="cannot change disk template")
2772
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2773
      if self.op.remote_node_uuid == pnode_uuid:
2774
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2775
                                   " as the primary node of the instance" %
2776
                                   self.op.remote_node, errors.ECODE_STATE)
2777
      CheckNodeOnline(self, self.op.remote_node_uuid)
2778
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2779
      # FIXME: here we assume that the old instance type is DT_PLAIN
2780
      assert self.instance.disk_template == constants.DT_PLAIN
2781
      disks = [{constants.IDISK_SIZE: d.size,
2782
                constants.IDISK_VG: d.logical_id[0]}
2783
               for d in self.instance.disks]
2784
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2785
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2786

    
2787
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2788
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2789
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2790
                                                              snode_group)
2791
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2792
                             ignore=self.op.ignore_ipolicy)
2793
      if pnode_info.group != snode_info.group:
2794
        self.LogWarning("The primary and secondary nodes are in two"
2795
                        " different node groups; the disk parameters"
2796
                        " from the first disk's node group will be"
2797
                        " used")
2798

    
2799
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2800
      # Make sure none of the nodes require exclusive storage
2801
      nodes = [pnode_info]
2802
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2803
        assert snode_info
2804
        nodes.append(snode_info)
2805
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2806
      if compat.any(map(has_es, nodes)):
2807
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2808
                  " storage is enabled" % (self.instance.disk_template,
2809
                                           self.op.disk_template))
2810
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2811

    
2812
  def _PreCheckDisks(self, ispec):
2813
    """CheckPrereq checks related to disk changes.
2814

2815
    @type ispec: dict
2816
    @param ispec: instance specs to be updated with the new disks
2817

2818
    """
2819
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2820

    
2821
    excl_stor = compat.any(
2822
      rpc.GetExclusiveStorageForNodes(self.cfg,
2823
                                      self.instance.all_nodes).values()
2824
      )
2825

    
2826
    # Check disk modifications. This is done here and not in CheckArguments
2827
    # (as with NICs), because we need to know the instance's disk template
2828
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2829
    if self.instance.disk_template == constants.DT_EXT:
2830
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2831
    else:
2832
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2833
                      ver_fn)
2834

    
2835
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2836

    
2837
    # Check the validity of the `provider' parameter
2838
    if self.instance.disk_template in constants.DT_EXT:
2839
      for mod in self.diskmod:
2840
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2841
        if mod[0] == constants.DDM_ADD:
2842
          if ext_provider is None:
2843
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2844
                                       " '%s' missing, during disk add" %
2845
                                       (constants.DT_EXT,
2846
                                        constants.IDISK_PROVIDER),
2847
                                       errors.ECODE_NOENT)
2848
        elif mod[0] == constants.DDM_MODIFY:
2849
          if ext_provider:
2850
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2851
                                       " modification" %
2852
                                       constants.IDISK_PROVIDER,
2853
                                       errors.ECODE_INVAL)
2854
    else:
2855
      for mod in self.diskmod:
2856
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2857
        if ext_provider is not None:
2858
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2859
                                     " instances of type '%s'" %
2860
                                     (constants.IDISK_PROVIDER,
2861
                                      constants.DT_EXT),
2862
                                     errors.ECODE_INVAL)
2863

    
2864
    if not self.op.wait_for_sync and self.instance.disks_active:
2865
      for mod in self.diskmod:
2866
        if mod[0] == constants.DDM_ADD:
2867
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2868
                                     " activated disks and"
2869
                                     " --no-wait-for-sync given.",
2870
                                     errors.ECODE_INVAL)
2871

    
2872
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2873
      raise errors.OpPrereqError("Disk operations not supported for"
2874
                                 " diskless instances", errors.ECODE_INVAL)
2875

    
2876
    def _PrepareDiskMod(_, disk, params, __):
2877
      disk.name = params.get(constants.IDISK_NAME, None)
2878

    
2879
    # Verify disk changes (operating on a copy)
2880
    disks = copy.deepcopy(self.instance.disks)
2881
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2882
                        _PrepareDiskMod, None)
2883
    utils.ValidateDeviceNames("disk", disks)
2884
    if len(disks) > constants.MAX_DISKS:
2885
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2886
                                 " more" % constants.MAX_DISKS,
2887
                                 errors.ECODE_STATE)
2888
    disk_sizes = [disk.size for disk in self.instance.disks]
2889
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2890
                      self.diskmod if op == constants.DDM_ADD)
2891
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2892
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2893

    
2894
    if self.op.offline is not None and self.op.offline:
2895
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2896
                         msg="can't change to offline")
2897

    
2898
  @staticmethod
2899
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2900
    """Create a NIC mod that adds or removes the instance
2901
    communication NIC to a running instance.
2902

2903
    The NICS are dynamically created using the Dynamic Device
2904
    Modification (DDM).  This function produces a NIC modification
2905
    (mod) that inserts an additional NIC meant for instance
2906
    communication in or removes an existing instance communication NIC
2907
    from a running instance, using DDM.
2908

2909
    @type cfg: L{config.ConfigWriter}
2910
    @param cfg: cluster configuration
2911

2912
    @type instance_communication: boolean
2913
    @param instance_communication: whether instance communication is
2914
                                   enabled or disabled
2915

2916
    @type instance: L{objects.Instance}
2917
    @param instance: instance to which the NIC mod will be applied to
2918

2919
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
2920
            (L{constants.DDM_REMOVE}, -1, parameters) or
2921
            L{None}
2922
    @return: DDM mod containing an action to add or remove the NIC, or
2923
             None if nothing needs to be done
2924

2925
    """
2926
    nic_name = "%s%s" % (constants.INSTANCE_COMMUNICATION_NIC_PREFIX,
2927
                         instance.name)
2928

    
2929
    instance_communication_nic = None
2930

    
2931
    for nic in instance.nics:
2932
      if nic.name == nic_name:
2933
        instance_communication_nic = nic
2934
        break
2935

    
2936
    if instance_communication and not instance_communication_nic:
2937
      action = constants.DDM_ADD
2938
      params = {constants.INIC_NAME: nic_name,
2939
                constants.INIC_MAC: constants.VALUE_GENERATE,
2940
                constants.INIC_IP: constants.NIC_IP_POOL,
2941
                constants.INIC_NETWORK:
2942
                  cfg.GetInstanceCommunicationNetwork()}
2943
    elif not instance_communication and instance_communication_nic:
2944
      action = constants.DDM_REMOVE
2945
      params = None
2946
    else:
2947
      action = None
2948
      params = None
2949

    
2950
    if action is not None:
2951
      return (action, -1, params)
2952
    else:
2953
      return None
2954

    
2955
  def CheckPrereq(self):
2956
    """Check prerequisites.
2957

2958
    This only checks the instance list against the existing names.
2959

2960
    """
2961
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2962
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2963
    self.cluster = self.cfg.GetClusterInfo()
2964
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2965

    
2966
    assert self.instance is not None, \
2967
      "Cannot retrieve locked instance %s" % self.op.instance_name
2968

    
2969
    pnode_uuid = self.instance.primary_node
2970

    
2971
    self.warn = []
2972

    
2973
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2974
        not self.op.force):
2975
      # verify that the instance is not up
2976
      instance_info = self.rpc.call_instance_info(
2977
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2978
          cluster_hvparams)
2979
      if instance_info.fail_msg:
2980
        self.warn.append("Can't get instance runtime information: %s" %
2981
                         instance_info.fail_msg)
2982
      elif instance_info.payload:
2983
        raise errors.OpPrereqError("Instance is still running on %s" %
2984
                                   self.cfg.GetNodeName(pnode_uuid),
2985
                                   errors.ECODE_STATE)
2986

    
2987
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2988
    node_uuids = list(self.instance.all_nodes)
2989
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2990

    
2991
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2992
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2993
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2994

    
2995
    # dictionary with instance information after the modification
2996
    ispec = {}
2997

    
2998
    if self.op.hotplug or self.op.hotplug_if_possible:
2999
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3000
                                               self.instance)
3001
      if result.fail_msg:
3002
        if self.op.hotplug:
3003
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3004
                       prereq=True)
3005
        else:
3006
          self.LogWarning(result.fail_msg)
3007
          self.op.hotplug = False
3008
          self.LogInfo("Modification will take place without hotplugging.")
3009
      else:
3010
        self.op.hotplug = True
3011

    
3012
    # Prepare NIC modifications
3013
    # add or remove NIC for instance communication
3014
    if self.op.instance_communication is not None:
3015
      mod = self._InstanceCommunicationDDM(self.cfg,
3016
                                           self.op.instance_communication,
3017
                                           self.instance)
3018
      if mod is not None:
3019
        self.op.nics.append(mod)
3020

    
3021
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3022

    
3023
    # OS change
3024
    if self.op.os_name and not self.op.force:
3025
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
3026
                     self.op.force_variant)
3027
      instance_os = self.op.os_name
3028
    else:
3029
      instance_os = self.instance.os
3030

    
3031
    assert not (self.op.disk_template and self.op.disks), \
3032
      "Can't modify disk template and apply disk changes at the same time"
3033

    
3034
    if self.op.disk_template:
3035
      self._PreCheckDiskTemplate(pnode_info)
3036

    
3037
    self._PreCheckDisks(ispec)
3038

    
3039
    # hvparams processing
3040
    if self.op.hvparams:
3041
      hv_type = self.instance.hypervisor
3042
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3043
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3044
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3045

    
3046
      # local check
3047
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3048
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3049
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3050
      self.hv_inst = i_hvdict # the new dict (without defaults)
3051
    else:
3052
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3053
                                                   self.instance.os,
3054
                                                   self.instance.hvparams)
3055
      self.hv_new = self.hv_inst = {}
3056

    
3057
    # beparams processing
3058
    if self.op.beparams:
3059
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3060
                                  use_none=True)
3061
      objects.UpgradeBeParams(i_bedict)
3062
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3063
      be_new = self.cluster.SimpleFillBE(i_bedict)
3064
      self.be_proposed = self.be_new = be_new # the new actual values
3065
      self.be_inst = i_bedict # the new dict (without defaults)
3066
    else:
3067
      self.be_new = self.be_inst = {}
3068
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3069
    be_old = self.cluster.FillBE(self.instance)
3070

    
3071
    # CPU param validation -- checking every time a parameter is
3072
    # changed to cover all cases where either CPU mask or vcpus have
3073
    # changed
3074
    if (constants.BE_VCPUS in self.be_proposed and
3075
        constants.HV_CPU_MASK in self.hv_proposed):
3076
      cpu_list = \
3077
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3078
      # Verify mask is consistent with number of vCPUs. Can skip this
3079
      # test if only 1 entry in the CPU mask, which means same mask
3080
      # is applied to all vCPUs.
3081
      if (len(cpu_list) > 1 and
3082
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3083
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3084
                                   " CPU mask [%s]" %
3085
                                   (self.be_proposed[constants.BE_VCPUS],
3086
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3087
                                   errors.ECODE_INVAL)
3088

    
3089
      # Only perform this test if a new CPU mask is given
3090
      if constants.HV_CPU_MASK in self.hv_new:
3091
        # Calculate the largest CPU number requested
3092
        max_requested_cpu = max(map(max, cpu_list))
3093
        # Check that all of the instance's nodes have enough physical CPUs to
3094
        # satisfy the requested CPU mask
3095
        hvspecs = [(self.instance.hypervisor,
3096
                    self.cfg.GetClusterInfo()
3097
                      .hvparams[self.instance.hypervisor])]
3098
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3099
                                max_requested_cpu + 1,
3100
                                hvspecs)
3101

    
3102
    # osparams processing
3103
    if self.op.osparams or self.op.osparams_private:
3104
      public_parms = self.op.osparams or {}
3105
      private_parms = self.op.osparams_private or {}
3106
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3107

    
3108
      if dupe_keys:
3109
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3110
                                   utils.CommaJoin(dupe_keys))
3111

    
3112
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3113
                                      public_parms)
3114
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3115
                                              private_parms)
3116

    
3117
      CheckOSParams(self, True, node_uuids, instance_os,
3118
                    objects.FillDict(self.os_inst,
3119
                                     self.os_inst_private))
3120

    
3121
    else:
3122
      self.os_inst = {}
3123
      self.os_inst_private = {}
3124

    
3125
    #TODO(dynmem): do the appropriate check involving MINMEM
3126
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3127
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3128
      mem_check_list = [pnode_uuid]
3129
      if be_new[constants.BE_AUTO_BALANCE]:
3130
        # either we changed auto_balance to yes or it was from before
3131
        mem_check_list.extend(self.instance.secondary_nodes)
3132
      instance_info = self.rpc.call_instance_info(
3133
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3134
          cluster_hvparams)
3135
      hvspecs = [(self.instance.hypervisor,
3136
                  cluster_hvparams)]
3137
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3138
                                         hvspecs)
3139
      pninfo = nodeinfo[pnode_uuid]
3140
      msg = pninfo.fail_msg
3141
      if msg:
3142
        # Assume the primary node is unreachable and go ahead
3143
        self.warn.append("Can't get info from primary node %s: %s" %
3144
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3145
      else:
3146
        (_, _, (pnhvinfo, )) = pninfo.payload
3147
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3148
          self.warn.append("Node data from primary node %s doesn't contain"
3149
                           " free memory information" %
3150
                           self.cfg.GetNodeName(pnode_uuid))
3151
        elif instance_info.fail_msg:
3152
          self.warn.append("Can't get instance runtime information: %s" %
3153
                           instance_info.fail_msg)
3154
        else:
3155
          if instance_info.payload:
3156
            current_mem = int(instance_info.payload["memory"])
3157
          else:
3158
            # Assume instance not running
3159
            # (there is a slight race condition here, but it's not very
3160
            # probable, and we have no other way to check)
3161
            # TODO: Describe race condition
3162
            current_mem = 0
3163
          #TODO(dynmem): do the appropriate check involving MINMEM
3164
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3165
                      pnhvinfo["memory_free"])
3166
          if miss_mem > 0:
3167
            raise errors.OpPrereqError("This change will prevent the instance"
3168
                                       " from starting, due to %d MB of memory"
3169
                                       " missing on its primary node" %
3170
                                       miss_mem, errors.ECODE_NORES)
3171

    
3172
      if be_new[constants.BE_AUTO_BALANCE]:
3173
        for node_uuid, nres in nodeinfo.items():
3174
          if node_uuid not in self.instance.secondary_nodes:
3175
            continue
3176
          nres.Raise("Can't get info from secondary node %s" %
3177
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3178
                     ecode=errors.ECODE_STATE)
3179
          (_, _, (nhvinfo, )) = nres.payload
3180
          if not isinstance(nhvinfo.get("memory_free", None), int):
3181
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3182
                                       " memory information" %
3183
                                       self.cfg.GetNodeName(node_uuid),
3184
                                       errors.ECODE_STATE)
3185
          #TODO(dynmem): do the appropriate check involving MINMEM
3186
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3187
            raise errors.OpPrereqError("This change will prevent the instance"
3188
                                       " from failover to its secondary node"
3189
                                       " %s, due to not enough memory" %
3190
                                       self.cfg.GetNodeName(node_uuid),
3191
                                       errors.ECODE_STATE)
3192

    
3193
    if self.op.runtime_mem:
3194
      remote_info = self.rpc.call_instance_info(
3195
         self.instance.primary_node, self.instance.name,
3196
         self.instance.hypervisor,
3197
         cluster_hvparams)
3198
      remote_info.Raise("Error checking node %s" %
3199
                        self.cfg.GetNodeName(self.instance.primary_node))
3200
      if not remote_info.payload: # not running already
3201
        raise errors.OpPrereqError("Instance %s is not running" %
3202
                                   self.instance.name, errors.ECODE_STATE)
3203

    
3204
      current_memory = remote_info.payload["memory"]
3205
      if (not self.op.force and
3206
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3207
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3208
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3209
                                   " and %d MB of memory unless --force is"
3210
                                   " given" %
3211
                                   (self.instance.name,
3212
                                    self.be_proposed[constants.BE_MINMEM],
3213
                                    self.be_proposed[constants.BE_MAXMEM]),
3214
                                   errors.ECODE_INVAL)
3215

    
3216
      delta = self.op.runtime_mem - current_memory
3217
      if delta > 0:
3218
        CheckNodeFreeMemory(
3219
            self, self.instance.primary_node,
3220
            "ballooning memory for instance %s" % self.instance.name, delta,
3221
            self.instance.hypervisor,
3222
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3223

    
3224
    # make self.cluster visible in the functions below
3225
    cluster = self.cluster
3226

    
3227
    def _PrepareNicCreate(_, params, private):
3228
      self._PrepareNicModification(params, private, None, None,
3229
                                   {}, cluster, pnode_uuid)
3230
      return (None, None)
3231

    
3232
    def _PrepareNicMod(_, nic, params, private):
3233
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3234
                                   nic.nicparams, cluster, pnode_uuid)
3235
      return None
3236

    
3237
    def _PrepareNicRemove(_, params, __):
3238
      ip = params.ip
3239
      net = params.network
3240
      if net is not None and ip is not None:
3241
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3242

    
3243
    # Verify NIC changes (operating on copy)
3244
    nics = [nic.Copy() for nic in self.instance.nics]
3245
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3246
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3247
    if len(nics) > constants.MAX_NICS:
3248
      raise errors.OpPrereqError("Instance has too many network interfaces"
3249
                                 " (%d), cannot add more" % constants.MAX_NICS,
3250
                                 errors.ECODE_STATE)
3251

    
3252
    # Pre-compute NIC changes (necessary to use result in hooks)
3253
    self._nic_chgdesc = []
3254
    if self.nicmod:
3255
      # Operate on copies as this is still in prereq
3256
      nics = [nic.Copy() for nic in self.instance.nics]
3257
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3258
                          self._CreateNewNic, self._ApplyNicMods,
3259
                          self._RemoveNic)
3260
      # Verify that NIC names are unique and valid
3261
      utils.ValidateDeviceNames("NIC", nics)
3262
      self._new_nics = nics
3263
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3264
    else:
3265
      self._new_nics = None
3266
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3267

    
3268
    if not self.op.ignore_ipolicy:
3269
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3270
                                                              group_info)
3271

    
3272
      # Fill ispec with backend parameters
3273
      ispec[constants.ISPEC_SPINDLE_USE] = \
3274
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3275
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3276
                                                         None)
3277

    
3278
      # Copy ispec to verify parameters with min/max values separately
3279
      if self.op.disk_template:
3280
        new_disk_template = self.op.disk_template
3281
      else:
3282
        new_disk_template = self.instance.disk_template
3283
      ispec_max = ispec.copy()
3284
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3285
        self.be_new.get(constants.BE_MAXMEM, None)
3286
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3287
                                                     new_disk_template)
3288
      ispec_min = ispec.copy()
3289
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3290
        self.be_new.get(constants.BE_MINMEM, None)
3291
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3292
                                                     new_disk_template)
3293

    
3294
      if (res_max or res_min):
3295
        # FIXME: Improve error message by including information about whether
3296
        # the upper or lower limit of the parameter fails the ipolicy.
3297
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3298
               (group_info, group_info.name,
3299
                utils.CommaJoin(set(res_max + res_min))))
3300
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3301

    
3302
  def _ConvertPlainToDrbd(self, feedback_fn):
3303
    """Converts an instance from plain to drbd.
3304

3305
    """
3306
    feedback_fn("Converting template to drbd")
3307
    pnode_uuid = self.instance.primary_node
3308
    snode_uuid = self.op.remote_node_uuid
3309

    
3310
    assert self.instance.disk_template == constants.DT_PLAIN
3311

    
3312
    # create a fake disk info for _GenerateDiskTemplate
3313
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3314
                  constants.IDISK_VG: d.logical_id[0],
3315
                  constants.IDISK_NAME: d.name}
3316
                 for d in self.instance.disks]
3317
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3318
                                     self.instance.uuid, pnode_uuid,
3319
                                     [snode_uuid], disk_info, None, None, 0,
3320
                                     feedback_fn, self.diskparams)
3321
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3322
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3323
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3324
    info = GetInstanceInfoText(self.instance)
3325
    feedback_fn("Creating additional volumes...")
3326
    # first, create the missing data and meta devices
3327
    for disk in anno_disks:
3328
      # unfortunately this is... not too nice
3329
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3330
                           info, True, p_excl_stor)
3331
      for child in disk.children:
3332
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3333
                             s_excl_stor)
3334
    # at this stage, all new LVs have been created, we can rename the
3335
    # old ones
3336
    feedback_fn("Renaming original volumes...")
3337
    rename_list = [(o, n.children[0].logical_id)
3338
                   for (o, n) in zip(self.instance.disks, new_disks)]
3339
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3340
    result.Raise("Failed to rename original LVs")
3341

    
3342
    feedback_fn("Initializing DRBD devices...")
3343
    # all child devices are in place, we can now create the DRBD devices
3344
    try:
3345
      for disk in anno_disks:
3346
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3347
                                       (snode_uuid, s_excl_stor)]:
3348
          f_create = node_uuid == pnode_uuid
3349
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3350
                               f_create, excl_stor)
3351
    except errors.GenericError, e:
3352
      feedback_fn("Initializing of DRBD devices failed;"
3353
                  " renaming back original volumes...")
3354
      rename_back_list = [(n.children[0], o.logical_id)
3355
                          for (n, o) in zip(new_disks, self.instance.disks)]
3356
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3357
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3358
      raise
3359

    
3360
    # at this point, the instance has been modified
3361
    self.instance.disk_template = constants.DT_DRBD8
3362
    self.instance.disks = new_disks
3363
    self.cfg.Update(self.instance, feedback_fn)
3364

    
3365
    # Release node locks while waiting for sync
3366
    ReleaseLocks(self, locking.LEVEL_NODE)
3367

    
3368
    # disks are created, waiting for sync
3369
    disk_abort = not WaitForSync(self, self.instance,
3370
                                 oneshot=not self.op.wait_for_sync)
3371
    if disk_abort:
3372
      raise errors.OpExecError("There are some degraded disks for"
3373
                               " this instance, please cleanup manually")
3374

    
3375
    # Node resource locks will be released by caller
3376

    
3377
  def _ConvertDrbdToPlain(self, feedback_fn):
3378
    """Converts an instance from drbd to plain.
3379

3380
    """
3381
    assert len(self.instance.secondary_nodes) == 1
3382
    assert self.instance.disk_template == constants.DT_DRBD8
3383

    
3384
    pnode_uuid = self.instance.primary_node
3385
    snode_uuid = self.instance.secondary_nodes[0]
3386
    feedback_fn("Converting template to plain")
3387

    
3388
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3389
    new_disks = [d.children[0] for d in self.instance.disks]
3390

    
3391
    # copy over size, mode and name
3392
    for parent, child in zip(old_disks, new_disks):
3393
      child.size = parent.size
3394
      child.mode = parent.mode
3395
      child.name = parent.name
3396

    
3397
    # this is a DRBD disk, return its port to the pool
3398
    # NOTE: this must be done right before the call to cfg.Update!
3399
    for disk in old_disks:
3400
      tcp_port = disk.logical_id[2]
3401
      self.cfg.AddTcpUdpPort(tcp_port)
3402

    
3403
    # update instance structure
3404
    self.instance.disks = new_disks
3405
    self.instance.disk_template = constants.DT_PLAIN
3406
    _UpdateIvNames(0, self.instance.disks)
3407
    self.cfg.Update(self.instance, feedback_fn)
3408

    
3409
    # Release locks in case removing disks takes a while
3410
    ReleaseLocks(self, locking.LEVEL_NODE)
3411

    
3412
    feedback_fn("Removing volumes on the secondary node...")
3413
    for disk in old_disks:
3414
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3415
      result.Warn("Could not remove block device %s on node %s,"
3416
                  " continuing anyway" %
3417
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3418
                  self.LogWarning)
3419

    
3420
    feedback_fn("Removing unneeded volumes on the primary node...")
3421
    for idx, disk in enumerate(old_disks):
3422
      meta = disk.children[1]
3423
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3424
      result.Warn("Could not remove metadata for disk %d on node %s,"
3425
                  " continuing anyway" %
3426
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3427
                  self.LogWarning)
3428

    
3429
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3430
    self.LogInfo("Trying to hotplug device...")
3431
    msg = "hotplug:"
3432
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3433
                                          self.instance, action, dev_type,
3434
                                          (device, self.instance),
3435
                                          extra, seq)
3436
    if result.fail_msg:
3437
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3438
      self.LogInfo("Continuing execution..")
3439
      msg += "failed"
3440
    else:
3441
      self.LogInfo("Hotplug done.")
3442
      msg += "done"
3443
    return msg
3444

    
3445
  def _CreateNewDisk(self, idx, params, _):
3446
    """Creates a new disk.
3447

3448
    """
3449
    # add a new disk
3450
    if self.instance.disk_template in constants.DTS_FILEBASED:
3451
      (file_driver, file_path) = self.instance.disks[0].logical_id
3452
      file_path = os.path.dirname(file_path)
3453
    else:
3454
      file_driver = file_path = None
3455

    
3456
    disk = \
3457
      GenerateDiskTemplate(self, self.instance.disk_template,
3458
                           self.instance.uuid, self.instance.primary_node,
3459
                           self.instance.secondary_nodes, [params], file_path,
3460
                           file_driver, idx, self.Log, self.diskparams)[0]
3461

    
3462
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3463

    
3464
    if self.cluster.prealloc_wipe_disks:
3465
      # Wipe new disk
3466
      WipeOrCleanupDisks(self, self.instance,
3467
                         disks=[(idx, disk, 0)],
3468
                         cleanup=new_disks)
3469

    
3470
    changes = [
3471
      ("disk/%d" % idx,
3472
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3473
      ]
3474
    if self.op.hotplug:
3475
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3476
                                               (disk, self.instance),
3477
                                               self.instance.name, True, idx)
3478
      if result.fail_msg:
3479
        changes.append(("disk/%d" % idx, "assemble:failed"))
3480
        self.LogWarning("Can't assemble newly created disk %d: %s",
3481
                        idx, result.fail_msg)
3482
      else:
3483
        _, link_name = result.payload
3484
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3485
                                  constants.HOTPLUG_TARGET_DISK,
3486
                                  disk, link_name, idx)
3487
        changes.append(("disk/%d" % idx, msg))
3488

    
3489
    return (disk, changes)
3490

    
3491
  def _PostAddDisk(self, _, disk):
3492
    if not WaitForSync(self, self.instance, disks=[disk],
3493
                       oneshot=not self.op.wait_for_sync):
3494
      raise errors.OpExecError("Failed to sync disks of %s" %
3495
                               self.instance.name)
3496

    
3497
    # the disk is active at this point, so deactivate it if the instance disks
3498
    # are supposed to be inactive
3499
    if not self.instance.disks_active:
3500
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3501

    
3502
  def _ModifyDisk(self, idx, disk, params, _):
3503
    """Modifies a disk.
3504

3505
    """
3506
    changes = []
3507
    if constants.IDISK_MODE in params:
3508
      disk.mode = params.get(constants.IDISK_MODE)
3509
      changes.append(("disk.mode/%d" % idx, disk.mode))
3510

    
3511
    if constants.IDISK_NAME in params:
3512
      disk.name = params.get(constants.IDISK_NAME)
3513
      changes.append(("disk.name/%d" % idx, disk.name))
3514

    
3515
    # Modify arbitrary params in case instance template is ext
3516
    for key, value in params.iteritems():
3517
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3518
          self.instance.disk_template == constants.DT_EXT):
3519
        # stolen from GetUpdatedParams: default means reset/delete
3520
        if value.lower() == constants.VALUE_DEFAULT:
3521
          try:
3522
            del disk.params[key]
3523
          except KeyError:
3524
            pass
3525
        else:
3526
          disk.params[key] = value
3527
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3528

    
3529
    return changes
3530

    
3531
  def _RemoveDisk(self, idx, root, _):
3532
    """Removes a disk.
3533

3534
    """
3535
    hotmsg = ""
3536
    if self.op.hotplug:
3537
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3538
                                   constants.HOTPLUG_TARGET_DISK,
3539
                                   root, None, idx)
3540
      ShutdownInstanceDisks(self, self.instance, [root])
3541

    
3542
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3543
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3544
                             self.instance.primary_node):
3545
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3546
              .fail_msg
3547
      if msg:
3548
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3549
                        " continuing anyway", idx,
3550
                        self.cfg.GetNodeName(node_uuid), msg)
3551

    
3552
    # if this is a DRBD disk, return its port to the pool
3553
    if root.dev_type in constants.DTS_DRBD:
3554
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3555

    
3556
    return hotmsg
3557

    
3558
  def _CreateNewNic(self, idx, params, private):
3559
    """Creates data structure for a new network interface.
3560

3561
    """
3562
    mac = params[constants.INIC_MAC]
3563
    ip = params.get(constants.INIC_IP, None)
3564
    net = params.get(constants.INIC_NETWORK, None)
3565
    name = params.get(constants.INIC_NAME, None)
3566
    net_uuid = self.cfg.LookupNetwork(net)
3567
    #TODO: not private.filled?? can a nic have no nicparams??
3568
    nicparams = private.filled
3569
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3570
                       nicparams=nicparams)
3571
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3572

    
3573
    changes = [
3574
      ("nic.%d" % idx,
3575
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3576
       (mac, ip, private.filled[constants.NIC_MODE],
3577
       private.filled[constants.NIC_LINK], net)),
3578
      ]
3579

    
3580
    if self.op.hotplug:
3581
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3582
                                constants.HOTPLUG_TARGET_NIC,
3583
                                nobj, None, idx)
3584
      changes.append(("nic.%d" % idx, msg))
3585

    
3586
    return (nobj, changes)
3587

    
3588
  def _ApplyNicMods(self, idx, nic, params, private):
3589
    """Modifies a network interface.
3590

3591
    """
3592
    changes = []
3593

    
3594
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3595
      if key in params:
3596
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3597
        setattr(nic, key, params[key])
3598

    
3599
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3600
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3601
    if new_net_uuid != nic.network:
3602
      changes.append(("nic.network/%d" % idx, new_net))
3603
      nic.network = new_net_uuid
3604

    
3605
    if private.filled:
3606
      nic.nicparams = private.filled
3607

    
3608
      for (key, val) in nic.nicparams.items():
3609
        changes.append(("nic.%s/%d" % (key, idx), val))
3610

    
3611
    if self.op.hotplug:
3612
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3613
                                constants.HOTPLUG_TARGET_NIC,
3614
                                nic, None, idx)
3615
      changes.append(("nic/%d" % idx, msg))
3616

    
3617
    return changes
3618

    
3619
  def _RemoveNic(self, idx, nic, _):
3620
    if self.op.hotplug:
3621
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3622
                                 constants.HOTPLUG_TARGET_NIC,
3623
                                 nic, None, idx)
3624

    
3625
  def Exec(self, feedback_fn):
3626
    """Modifies an instance.
3627

3628
    All parameters take effect only at the next restart of the instance.
3629

3630
    """
3631
    # Process here the warnings from CheckPrereq, as we don't have a
3632
    # feedback_fn there.
3633
    # TODO: Replace with self.LogWarning
3634
    for warn in self.warn:
3635
      feedback_fn("WARNING: %s" % warn)
3636

    
3637
    assert ((self.op.disk_template is None) ^
3638
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3639
      "Not owning any node resource locks"
3640

    
3641
    result = []
3642

    
3643
    # New primary node
3644
    if self.op.pnode_uuid:
3645
      self.instance.primary_node = self.op.pnode_uuid
3646

    
3647
    # runtime memory
3648
    if self.op.runtime_mem:
3649
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3650
                                                     self.instance,
3651
                                                     self.op.runtime_mem)
3652
      rpcres.Raise("Cannot modify instance runtime memory")
3653
      result.append(("runtime_memory", self.op.runtime_mem))
3654

    
3655
    # Apply disk changes
3656
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3657
                        self._CreateNewDisk, self._ModifyDisk,
3658
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3659
    _UpdateIvNames(0, self.instance.disks)
3660

    
3661
    if self.op.disk_template:
3662
      if __debug__:
3663
        check_nodes = set(self.instance.all_nodes)
3664
        if self.op.remote_node_uuid:
3665
          check_nodes.add(self.op.remote_node_uuid)
3666
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3667
          owned = self.owned_locks(level)
3668
          assert not (check_nodes - owned), \
3669
            ("Not owning the correct locks, owning %r, expected at least %r" %
3670
             (owned, check_nodes))
3671

    
3672
      r_shut = ShutdownInstanceDisks(self, self.instance)
3673
      if not r_shut:
3674
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3675
                                 " proceed with disk template conversion")
3676
      mode = (self.instance.disk_template, self.op.disk_template)
3677
      try:
3678
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3679
      except:
3680
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3681
        raise
3682
      result.append(("disk_template", self.op.disk_template))
3683

    
3684
      assert self.instance.disk_template == self.op.disk_template, \
3685
        ("Expected disk template '%s', found '%s'" %
3686
         (self.op.disk_template, self.instance.disk_template))
3687

    
3688
    # Release node and resource locks if there are any (they might already have
3689
    # been released during disk conversion)
3690
    ReleaseLocks(self, locking.LEVEL_NODE)
3691
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3692

    
3693
    # Apply NIC changes
3694
    if self._new_nics is not None:
3695
      self.instance.nics = self._new_nics
3696
      result.extend(self._nic_chgdesc)
3697

    
3698
    # hvparams changes
3699
    if self.op.hvparams:
3700
      self.instance.hvparams = self.hv_inst
3701
      for key, val in self.op.hvparams.iteritems():
3702
        result.append(("hv/%s" % key, val))
3703

    
3704
    # beparams changes
3705
    if self.op.beparams:
3706
      self.instance.beparams = self.be_inst
3707
      for key, val in self.op.beparams.iteritems():
3708
        result.append(("be/%s" % key, val))
3709

    
3710
    # OS change
3711
    if self.op.os_name:
3712
      self.instance.os = self.op.os_name
3713

    
3714
    # osparams changes
3715
    if self.op.osparams:
3716
      self.instance.osparams = self.os_inst
3717
      for key, val in self.op.osparams.iteritems():
3718
        result.append(("os/%s" % key, val))
3719

    
3720
    if self.op.osparams_private:
3721
      self.instance.osparams_private = self.os_inst_private
3722
      for key, val in self.op.osparams_private.iteritems():
3723
        # Show the Private(...) blurb.
3724
        result.append(("os_private/%s" % key, repr(val)))
3725

    
3726
    if self.op.offline is None:
3727
      # Ignore
3728
      pass
3729
    elif self.op.offline:
3730
      # Mark instance as offline
3731
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3732
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3733
    else:
3734
      # Mark instance as online, but stopped
3735
      self.cfg.MarkInstanceDown(self.instance.uuid)
3736
      result.append(("admin_state", constants.ADMINST_DOWN))
3737

    
3738
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3739

    
3740
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3741
                self.owned_locks(locking.LEVEL_NODE)), \
3742
      "All node locks should have been released by now"
3743

    
3744
    return result
3745

    
3746
  _DISK_CONVERSIONS = {
3747
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3748
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3749
    }
3750

    
3751

    
3752
class LUInstanceChangeGroup(LogicalUnit):
3753
  HPATH = "instance-change-group"
3754
  HTYPE = constants.HTYPE_INSTANCE
3755
  REQ_BGL = False
3756

    
3757
  def ExpandNames(self):
3758
    self.share_locks = ShareAll()
3759

    
3760
    self.needed_locks = {
3761
      locking.LEVEL_NODEGROUP: [],
3762
      locking.LEVEL_NODE: [],
3763
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3764
      }
3765

    
3766
    self._ExpandAndLockInstance()
3767

    
3768
    if self.op.target_groups:
3769
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3770
                                  self.op.target_groups)
3771
    else:
3772
      self.req_target_uuids = None
3773

    
3774
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3775

    
3776
  def DeclareLocks(self, level):
3777
    if level == locking.LEVEL_NODEGROUP:
3778
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3779

    
3780
      if self.req_target_uuids:
3781
        lock_groups = set(self.req_target_uuids)
3782

    
3783
        # Lock all groups used by instance optimistically; this requires going
3784
        # via the node before it's locked, requiring verification later on
3785
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3786
        lock_groups.update(instance_groups)
3787
      else:
3788
        # No target groups, need to lock all of them
3789
        lock_groups = locking.ALL_SET
3790

    
3791
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3792

    
3793
    elif level == locking.LEVEL_NODE:
3794
      if self.req_target_uuids:
3795
        # Lock all nodes used by instances
3796
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3797
        self._LockInstancesNodes()
3798

    
3799
        # Lock all nodes in all potential target groups
3800
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3801
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3802
        member_nodes = [node_uuid
3803
                        for group in lock_groups
3804
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3805
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3806
      else:
3807
        # Lock all nodes as all groups are potential targets
3808
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3809

    
3810
  def CheckPrereq(self):
3811
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3812
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3813
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3814

    
3815
    assert (self.req_target_uuids is None or
3816
            owned_groups.issuperset(self.req_target_uuids))
3817
    assert owned_instance_names == set([self.op.instance_name])
3818

    
3819
    # Get instance information
3820
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3821

    
3822
    # Check if node groups for locked instance are still correct
3823
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3824
      ("Instance %s's nodes changed while we kept the lock" %
3825
       self.op.instance_name)
3826

    
3827
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3828
                                          owned_groups)
3829

    
3830
    if self.req_target_uuids:
3831
      # User requested specific target groups
3832
      self.target_uuids = frozenset(self.req_target_uuids)
3833
    else:
3834
      # All groups except those used by the instance are potential targets
3835
      self.target_uuids = owned_groups - inst_groups
3836

    
3837
    conflicting_groups = self.target_uuids & inst_groups
3838
    if conflicting_groups:
3839
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3840
                                 " used by the instance '%s'" %
3841
                                 (utils.CommaJoin(conflicting_groups),
3842
                                  self.op.instance_name),
3843
                                 errors.ECODE_INVAL)
3844

    
3845
    if not self.target_uuids:
3846
      raise errors.OpPrereqError("There are no possible target groups",
3847
                                 errors.ECODE_INVAL)
3848

    
3849
  def BuildHooksEnv(self):
3850
    """Build hooks env.
3851

3852
    """
3853
    assert self.target_uuids
3854

    
3855
    env = {
3856
      "TARGET_GROUPS": " ".join(self.target_uuids),
3857
      }
3858

    
3859
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3860

    
3861
    return env
3862

    
3863
  def BuildHooksNodes(self):
3864
    """Build hooks nodes.
3865

3866
    """
3867
    mn = self.cfg.GetMasterNode()
3868
    return ([mn], [mn])
3869

    
3870
  def Exec(self, feedback_fn):
3871
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3872

    
3873
    assert instances == [self.op.instance_name], "Instance not locked"
3874

    
3875
    req = iallocator.IAReqGroupChange(instances=instances,
3876
                                      target_groups=list(self.target_uuids))
3877
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3878

    
3879
    ial.Run(self.op.iallocator)
3880

    
3881
    if not ial.success:
3882
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3883
                                 " instance '%s' using iallocator '%s': %s" %
3884
                                 (self.op.instance_name, self.op.iallocator,
3885
                                  ial.info), errors.ECODE_NORES)
3886

    
3887
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3888

    
3889
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3890
                 " instance '%s'", len(jobs), self.op.instance_name)
3891

    
3892
    return ResultWithJobs(jobs)