Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 6ccce5d4

History | View | Annotate | Download (155.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
import ganeti.rpc.node as rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177
    vlan = nic.get(constants.INIC_VLAN, None)
178

    
179
    if net is None or net.lower() == constants.VALUE_NONE:
180
      net = None
181
    else:
182
      if nic_mode_req is not None or link is not None:
183
        raise errors.OpPrereqError("If network is given, no mode or link"
184
                                   " is allowed to be passed",
185
                                   errors.ECODE_INVAL)
186

    
187
    # ip validity checks
188
    if ip is None or ip.lower() == constants.VALUE_NONE:
189
      nic_ip = None
190
    elif ip.lower() == constants.VALUE_AUTO:
191
      if not op.name_check:
192
        raise errors.OpPrereqError("IP address set to auto but name checks"
193
                                   " have been skipped",
194
                                   errors.ECODE_INVAL)
195
      nic_ip = default_ip
196
    else:
197
      # We defer pool operations until later, so that the iallocator has
198
      # filled in the instance's node(s) dimara
199
      if ip.lower() == constants.NIC_IP_POOL:
200
        if net is None:
201
          raise errors.OpPrereqError("if ip=pool, parameter network"
202
                                     " must be passed too",
203
                                     errors.ECODE_INVAL)
204

    
205
      elif not netutils.IPAddress.IsValid(ip):
206
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
207
                                   errors.ECODE_INVAL)
208

    
209
      nic_ip = ip
210

    
211
    # TODO: check the ip address for uniqueness
212
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
213
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
214
                                 errors.ECODE_INVAL)
215

    
216
    # MAC address verification
217
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
218
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
219
      mac = utils.NormalizeAndValidateMac(mac)
220

    
221
      try:
222
        # TODO: We need to factor this out
223
        cfg.ReserveMAC(mac, ec_id)
224
      except errors.ReservationError:
225
        raise errors.OpPrereqError("MAC address %s already in use"
226
                                   " in cluster" % mac,
227
                                   errors.ECODE_NOTUNIQUE)
228

    
229
    #  Build nic parameters
230
    nicparams = {}
231
    if nic_mode_req:
232
      nicparams[constants.NIC_MODE] = nic_mode
233
    if link:
234
      nicparams[constants.NIC_LINK] = link
235
    if vlan:
236
      nicparams[constants.NIC_VLAN] = vlan
237

    
238
    check_params = cluster.SimpleFillNIC(nicparams)
239
    objects.NIC.CheckParameterSyntax(check_params)
240
    net_uuid = cfg.LookupNetwork(net)
241
    name = nic.get(constants.INIC_NAME, None)
242
    if name is not None and name.lower() == constants.VALUE_NONE:
243
      name = None
244
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
245
                          network=net_uuid, nicparams=nicparams)
246
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247
    nics.append(nic_obj)
248

    
249
  return nics
250

    
251

    
252
def _CheckForConflictingIp(lu, ip, node_uuid):
253
  """In case of conflicting IP address raise error.
254

255
  @type ip: string
256
  @param ip: IP address
257
  @type node_uuid: string
258
  @param node_uuid: node UUID
259

260
  """
261
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
262
  if conf_net is not None:
263
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
264
                                " network %s, but the target NIC does not." %
265
                                (ip, conf_net)),
266
                               errors.ECODE_STATE)
267

    
268
  return (None, None)
269

    
270

    
271
def _ComputeIPolicyInstanceSpecViolation(
272
  ipolicy, instance_spec, disk_template,
273
  _compute_fn=ComputeIPolicySpecViolation):
274
  """Compute if instance specs meets the specs of ipolicy.
275

276
  @type ipolicy: dict
277
  @param ipolicy: The ipolicy to verify against
278
  @param instance_spec: dict
279
  @param instance_spec: The instance spec to verify
280
  @type disk_template: string
281
  @param disk_template: the disk template of the instance
282
  @param _compute_fn: The function to verify ipolicy (unittest only)
283
  @see: L{ComputeIPolicySpecViolation}
284

285
  """
286
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
287
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
288
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
289
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
290
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
291
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
292

    
293
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
294
                     disk_sizes, spindle_use, disk_template)
295

    
296

    
297
def _CheckOSVariant(os_obj, name):
298
  """Check whether an OS name conforms to the os variants specification.
299

300
  @type os_obj: L{objects.OS}
301
  @param os_obj: OS object to check
302
  @type name: string
303
  @param name: OS name passed by the user, to check for validity
304

305
  """
306
  variant = objects.OS.GetVariant(name)
307
  if not os_obj.supported_variants:
308
    if variant:
309
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
310
                                 " passed)" % (os_obj.name, variant),
311
                                 errors.ECODE_INVAL)
312
    return
313
  if not variant:
314
    raise errors.OpPrereqError("OS name must include a variant",
315
                               errors.ECODE_INVAL)
316

    
317
  if variant not in os_obj.supported_variants:
318
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
319

    
320

    
321
def _ComputeInstanceCommunicationNIC(instance_name):
322
  """Compute the name of the instance NIC used by instance
323
  communication.
324

325
  With instance communication, a new NIC is added to the instance.
326
  This NIC has a special name that identities it as being part of
327
  instance communication, and not just a normal NIC.  This function
328
  generates the name of the NIC based on a prefix and the instance
329
  name
330

331
  @type instance_name: string
332
  @param instance_name: name of the instance the NIC belongs to
333

334
  @rtype: string
335
  @return: name of the NIC
336

337
  """
338
  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
339

    
340

    
341
class LUInstanceCreate(LogicalUnit):
342
  """Create an instance.
343

344
  """
345
  HPATH = "instance-add"
346
  HTYPE = constants.HTYPE_INSTANCE
347
  REQ_BGL = False
348

    
349
  def _CheckDiskTemplateValid(self):
350
    """Checks validity of disk template.
351

352
    """
353
    cluster = self.cfg.GetClusterInfo()
354
    if self.op.disk_template is None:
355
      # FIXME: It would be better to take the default disk template from the
356
      # ipolicy, but for the ipolicy we need the primary node, which we get from
357
      # the iallocator, which wants the disk template as input. To solve this
358
      # chicken-and-egg problem, it should be possible to specify just a node
359
      # group from the iallocator and take the ipolicy from that.
360
      self.op.disk_template = cluster.enabled_disk_templates[0]
361
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
362

    
363
  def _CheckDiskArguments(self):
364
    """Checks validity of disk-related arguments.
365

366
    """
367
    # check that disk's names are unique and valid
368
    utils.ValidateDeviceNames("disk", self.op.disks)
369

    
370
    self._CheckDiskTemplateValid()
371

    
372
    # check disks. parameter names and consistent adopt/no-adopt strategy
373
    has_adopt = has_no_adopt = False
374
    for disk in self.op.disks:
375
      if self.op.disk_template != constants.DT_EXT:
376
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
377
      if constants.IDISK_ADOPT in disk:
378
        has_adopt = True
379
      else:
380
        has_no_adopt = True
381
    if has_adopt and has_no_adopt:
382
      raise errors.OpPrereqError("Either all disks are adopted or none is",
383
                                 errors.ECODE_INVAL)
384
    if has_adopt:
385
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
386
        raise errors.OpPrereqError("Disk adoption is not supported for the"
387
                                   " '%s' disk template" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390
      if self.op.iallocator is not None:
391
        raise errors.OpPrereqError("Disk adoption not allowed with an"
392
                                   " iallocator script", errors.ECODE_INVAL)
393
      if self.op.mode == constants.INSTANCE_IMPORT:
394
        raise errors.OpPrereqError("Disk adoption not allowed for"
395
                                   " instance import", errors.ECODE_INVAL)
396
    else:
397
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
398
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
399
                                   " but no 'adopt' parameter given" %
400
                                   self.op.disk_template,
401
                                   errors.ECODE_INVAL)
402

    
403
    self.adopt_disks = has_adopt
404

    
405
  def _CheckVLANArguments(self):
406
    """ Check validity of VLANs if given
407

408
    """
409
    for nic in self.op.nics:
410
      vlan = nic.get(constants.INIC_VLAN, None)
411
      if vlan:
412
        if vlan[0] == ".":
413
          # vlan starting with dot means single untagged vlan,
414
          # might be followed by trunk (:)
415
          if not vlan[1:].isdigit():
416
            vlanlist = vlan[1:].split(':')
417
            for vl in vlanlist:
418
              if not vl.isdigit():
419
                raise errors.OpPrereqError("Specified VLAN parameter is "
420
                                           "invalid : %s" % vlan,
421
                                             errors.ECODE_INVAL)
422
        elif vlan[0] == ":":
423
          # Trunk - tagged only
424
          vlanlist = vlan[1:].split(':')
425
          for vl in vlanlist:
426
            if not vl.isdigit():
427
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
428
                                           " : %s" % vlan, errors.ECODE_INVAL)
429
        elif vlan.isdigit():
430
          # This is the simplest case. No dots, only single digit
431
          # -> Create untagged access port, dot needs to be added
432
          nic[constants.INIC_VLAN] = "." + vlan
433
        else:
434
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
435
                                       " : %s" % vlan, errors.ECODE_INVAL)
436

    
437
  def CheckArguments(self):
438
    """Check arguments.
439

440
    """
441
    # do not require name_check to ease forward/backward compatibility
442
    # for tools
443
    if self.op.no_install and self.op.start:
444
      self.LogInfo("No-installation mode selected, disabling startup")
445
      self.op.start = False
446
    # validate/normalize the instance name
447
    self.op.instance_name = \
448
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
449

    
450
    if self.op.ip_check and not self.op.name_check:
451
      # TODO: make the ip check more flexible and not depend on the name check
452
      raise errors.OpPrereqError("Cannot do IP address check without a name"
453
                                 " check", errors.ECODE_INVAL)
454

    
455
    # add NIC for instance communication
456
    if self.op.instance_communication:
457
      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
458

    
459
      self.op.nics.append({constants.INIC_NAME: nic_name,
460
                           constants.INIC_MAC: constants.VALUE_GENERATE,
461
                           constants.INIC_IP: constants.NIC_IP_POOL,
462
                           constants.INIC_NETWORK:
463
                             self.cfg.GetInstanceCommunicationNetwork()})
464

    
465
    # check nics' parameter names
466
    for nic in self.op.nics:
467
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
468
    # check that NIC's parameters names are unique and valid
469
    utils.ValidateDeviceNames("NIC", self.op.nics)
470

    
471
    self._CheckVLANArguments()
472

    
473
    self._CheckDiskArguments()
474
    assert self.op.disk_template is not None
475

    
476
    # instance name verification
477
    if self.op.name_check:
478
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
479
      self.op.instance_name = self.hostname.name
480
      # used in CheckPrereq for ip ping check
481
      self.check_ip = self.hostname.ip
482
    else:
483
      self.check_ip = None
484

    
485
    # file storage checks
486
    if (self.op.file_driver and
487
        not self.op.file_driver in constants.FILE_DRIVER):
488
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
489
                                 self.op.file_driver, errors.ECODE_INVAL)
490

    
491
    # set default file_driver if unset and required
492
    if (not self.op.file_driver and
493
        self.op.disk_template in constants.DTS_FILEBASED):
494
      self.op.file_driver = constants.FD_LOOP
495

    
496
    ### Node/iallocator related checks
497
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
498

    
499
    if self.op.pnode is not None:
500
      if self.op.disk_template in constants.DTS_INT_MIRROR:
501
        if self.op.snode is None:
502
          raise errors.OpPrereqError("The networked disk templates need"
503
                                     " a mirror node", errors.ECODE_INVAL)
504
      elif self.op.snode:
505
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
506
                        " template")
507
        self.op.snode = None
508

    
509
    _CheckOpportunisticLocking(self.op)
510

    
511
    if self.op.mode == constants.INSTANCE_IMPORT:
512
      # On import force_variant must be True, because if we forced it at
513
      # initial install, our only chance when importing it back is that it
514
      # works again!
515
      self.op.force_variant = True
516

    
517
      if self.op.no_install:
518
        self.LogInfo("No-installation mode has no effect during import")
519

    
520
    elif self.op.mode == constants.INSTANCE_CREATE:
521
      if self.op.os_type is None:
522
        raise errors.OpPrereqError("No guest OS specified",
523
                                   errors.ECODE_INVAL)
524
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
525
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
526
                                   " installation" % self.op.os_type,
527
                                   errors.ECODE_STATE)
528
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
529
      self._cds = GetClusterDomainSecret()
530

    
531
      # Check handshake to ensure both clusters have the same domain secret
532
      src_handshake = self.op.source_handshake
533
      if not src_handshake:
534
        raise errors.OpPrereqError("Missing source handshake",
535
                                   errors.ECODE_INVAL)
536

    
537
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
538
                                                           src_handshake)
539
      if errmsg:
540
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
541
                                   errors.ECODE_INVAL)
542

    
543
      # Load and check source CA
544
      self.source_x509_ca_pem = self.op.source_x509_ca
545
      if not self.source_x509_ca_pem:
546
        raise errors.OpPrereqError("Missing source X509 CA",
547
                                   errors.ECODE_INVAL)
548

    
549
      try:
550
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
551
                                                    self._cds)
552
      except OpenSSL.crypto.Error, err:
553
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
554
                                   (err, ), errors.ECODE_INVAL)
555

    
556
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
557
      if errcode is not None:
558
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
559
                                   errors.ECODE_INVAL)
560

    
561
      self.source_x509_ca = cert
562

    
563
      src_instance_name = self.op.source_instance_name
564
      if not src_instance_name:
565
        raise errors.OpPrereqError("Missing source instance name",
566
                                   errors.ECODE_INVAL)
567

    
568
      self.source_instance_name = \
569
        netutils.GetHostname(name=src_instance_name).name
570

    
571
    else:
572
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
573
                                 self.op.mode, errors.ECODE_INVAL)
574

    
575
  def ExpandNames(self):
576
    """ExpandNames for CreateInstance.
577

578
    Figure out the right locks for instance creation.
579

580
    """
581
    self.needed_locks = {}
582

    
583
    # this is just a preventive check, but someone might still add this
584
    # instance in the meantime, and creation will fail at lock-add time
585
    if self.op.instance_name in\
586
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
587
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
588
                                 self.op.instance_name, errors.ECODE_EXISTS)
589

    
590
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
591

    
592
    if self.op.iallocator:
593
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
594
      # specifying a group on instance creation and then selecting nodes from
595
      # that group
596
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
597
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
598

    
599
      if self.op.opportunistic_locking:
600
        self.opportunistic_locks[locking.LEVEL_NODE] = True
601
    else:
602
      (self.op.pnode_uuid, self.op.pnode) = \
603
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
604
      nodelist = [self.op.pnode_uuid]
605
      if self.op.snode is not None:
606
        (self.op.snode_uuid, self.op.snode) = \
607
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
608
        nodelist.append(self.op.snode_uuid)
609
      self.needed_locks[locking.LEVEL_NODE] = nodelist
610

    
611
    # in case of import lock the source node too
612
    if self.op.mode == constants.INSTANCE_IMPORT:
613
      src_node = self.op.src_node
614
      src_path = self.op.src_path
615

    
616
      if src_path is None:
617
        self.op.src_path = src_path = self.op.instance_name
618

    
619
      if src_node is None:
620
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
621
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
622
        self.op.src_node = None
623
        if os.path.isabs(src_path):
624
          raise errors.OpPrereqError("Importing an instance from a path"
625
                                     " requires a source node option",
626
                                     errors.ECODE_INVAL)
627
      else:
628
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
629
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
630
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
631
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
632
        if not os.path.isabs(src_path):
633
          self.op.src_path = \
634
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
635

    
636
    self.needed_locks[locking.LEVEL_NODE_RES] = \
637
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
638

    
639
    # Optimistically acquire shared group locks (we're reading the
640
    # configuration).  We can't just call GetInstanceNodeGroups, because the
641
    # instance doesn't exist yet. Therefore we lock all node groups of all
642
    # nodes we have.
643
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
644
      # In the case we lock all nodes for opportunistic allocation, we have no
645
      # choice than to lock all groups, because they're allocated before nodes.
646
      # This is sad, but true. At least we release all those we don't need in
647
      # CheckPrereq later.
648
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
649
    else:
650
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
651
        list(self.cfg.GetNodeGroupsFromNodes(
652
          self.needed_locks[locking.LEVEL_NODE]))
653
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
654

    
655
  def DeclareLocks(self, level):
656
    if level == locking.LEVEL_NODE_RES and \
657
      self.opportunistic_locks[locking.LEVEL_NODE]:
658
      # Even when using opportunistic locking, we require the same set of
659
      # NODE_RES locks as we got NODE locks
660
      self.needed_locks[locking.LEVEL_NODE_RES] = \
661
        self.owned_locks(locking.LEVEL_NODE)
662

    
663
  def _RunAllocator(self):
664
    """Run the allocator based on input opcode.
665

666
    """
667
    if self.op.opportunistic_locking:
668
      # Only consider nodes for which a lock is held
669
      node_name_whitelist = self.cfg.GetNodeNames(
670
        self.owned_locks(locking.LEVEL_NODE))
671
    else:
672
      node_name_whitelist = None
673

    
674
    req = _CreateInstanceAllocRequest(self.op, self.disks,
675
                                      self.nics, self.be_full,
676
                                      node_name_whitelist)
677
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
678

    
679
    ial.Run(self.op.iallocator)
680

    
681
    if not ial.success:
682
      # When opportunistic locks are used only a temporary failure is generated
683
      if self.op.opportunistic_locking:
684
        ecode = errors.ECODE_TEMP_NORES
685
      else:
686
        ecode = errors.ECODE_NORES
687

    
688
      raise errors.OpPrereqError("Can't compute nodes using"
689
                                 " iallocator '%s': %s" %
690
                                 (self.op.iallocator, ial.info),
691
                                 ecode)
692

    
693
    (self.op.pnode_uuid, self.op.pnode) = \
694
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
695
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
696
                 self.op.instance_name, self.op.iallocator,
697
                 utils.CommaJoin(ial.result))
698

    
699
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
700

    
701
    if req.RequiredNodes() == 2:
702
      (self.op.snode_uuid, self.op.snode) = \
703
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
704

    
705
  def BuildHooksEnv(self):
706
    """Build hooks env.
707

708
    This runs on master, primary and secondary nodes of the instance.
709

710
    """
711
    env = {
712
      "ADD_MODE": self.op.mode,
713
      }
714
    if self.op.mode == constants.INSTANCE_IMPORT:
715
      env["SRC_NODE"] = self.op.src_node
716
      env["SRC_PATH"] = self.op.src_path
717
      env["SRC_IMAGES"] = self.src_images
718

    
719
    env.update(BuildInstanceHookEnv(
720
      name=self.op.instance_name,
721
      primary_node_name=self.op.pnode,
722
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
723
      status=self.op.start,
724
      os_type=self.op.os_type,
725
      minmem=self.be_full[constants.BE_MINMEM],
726
      maxmem=self.be_full[constants.BE_MAXMEM],
727
      vcpus=self.be_full[constants.BE_VCPUS],
728
      nics=NICListToTuple(self, self.nics),
729
      disk_template=self.op.disk_template,
730
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
731
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
732
             for d in self.disks],
733
      bep=self.be_full,
734
      hvp=self.hv_full,
735
      hypervisor_name=self.op.hypervisor,
736
      tags=self.op.tags,
737
      ))
738

    
739
    return env
740

    
741
  def BuildHooksNodes(self):
742
    """Build hooks nodes.
743

744
    """
745
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
746
    return nl, nl
747

    
748
  def _ReadExportInfo(self):
749
    """Reads the export information from disk.
750

751
    It will override the opcode source node and path with the actual
752
    information, if these two were not specified before.
753

754
    @return: the export information
755

756
    """
757
    assert self.op.mode == constants.INSTANCE_IMPORT
758

    
759
    if self.op.src_node_uuid is None:
760
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
761
      exp_list = self.rpc.call_export_list(locked_nodes)
762
      found = False
763
      for node_uuid in exp_list:
764
        if exp_list[node_uuid].fail_msg:
765
          continue
766
        if self.op.src_path in exp_list[node_uuid].payload:
767
          found = True
768
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
769
          self.op.src_node_uuid = node_uuid
770
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
771
                                            self.op.src_path)
772
          break
773
      if not found:
774
        raise errors.OpPrereqError("No export found for relative path %s" %
775
                                   self.op.src_path, errors.ECODE_INVAL)
776

    
777
    CheckNodeOnline(self, self.op.src_node_uuid)
778
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
779
    result.Raise("No export or invalid export found in dir %s" %
780
                 self.op.src_path)
781

    
782
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
783
    if not export_info.has_section(constants.INISECT_EXP):
784
      raise errors.ProgrammerError("Corrupted export config",
785
                                   errors.ECODE_ENVIRON)
786

    
787
    ei_version = export_info.get(constants.INISECT_EXP, "version")
788
    if int(ei_version) != constants.EXPORT_VERSION:
789
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
790
                                 (ei_version, constants.EXPORT_VERSION),
791
                                 errors.ECODE_ENVIRON)
792
    return export_info
793

    
794
  def _ReadExportParams(self, einfo):
795
    """Use export parameters as defaults.
796

797
    In case the opcode doesn't specify (as in override) some instance
798
    parameters, then try to use them from the export information, if
799
    that declares them.
800

801
    """
802
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
803

    
804
    if not self.op.disks:
805
      disks = []
806
      # TODO: import the disk iv_name too
807
      for idx in range(constants.MAX_DISKS):
808
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
809
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
810
          disks.append({constants.IDISK_SIZE: disk_sz})
811
      self.op.disks = disks
812
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
813
        raise errors.OpPrereqError("No disk info specified and the export"
814
                                   " is missing the disk information",
815
                                   errors.ECODE_INVAL)
816

    
817
    if not self.op.nics:
818
      nics = []
819
      for idx in range(constants.MAX_NICS):
820
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
821
          ndict = {}
822
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
823
            nic_param_name = "nic%d_%s" % (idx, name)
824
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
825
              v = einfo.get(constants.INISECT_INS, nic_param_name)
826
              ndict[name] = v
827
          nics.append(ndict)
828
        else:
829
          break
830
      self.op.nics = nics
831

    
832
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
833
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
834

    
835
    if (self.op.hypervisor is None and
836
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
837
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
838

    
839
    if einfo.has_section(constants.INISECT_HYP):
840
      # use the export parameters but do not override the ones
841
      # specified by the user
842
      for name, value in einfo.items(constants.INISECT_HYP):
843
        if name not in self.op.hvparams:
844
          self.op.hvparams[name] = value
845

    
846
    if einfo.has_section(constants.INISECT_BEP):
847
      # use the parameters, without overriding
848
      for name, value in einfo.items(constants.INISECT_BEP):
849
        if name not in self.op.beparams:
850
          self.op.beparams[name] = value
851
        # Compatibility for the old "memory" be param
852
        if name == constants.BE_MEMORY:
853
          if constants.BE_MAXMEM not in self.op.beparams:
854
            self.op.beparams[constants.BE_MAXMEM] = value
855
          if constants.BE_MINMEM not in self.op.beparams:
856
            self.op.beparams[constants.BE_MINMEM] = value
857
    else:
858
      # try to read the parameters old style, from the main section
859
      for name in constants.BES_PARAMETERS:
860
        if (name not in self.op.beparams and
861
            einfo.has_option(constants.INISECT_INS, name)):
862
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
863

    
864
    if einfo.has_section(constants.INISECT_OSP):
865
      # use the parameters, without overriding
866
      for name, value in einfo.items(constants.INISECT_OSP):
867
        if name not in self.op.osparams:
868
          self.op.osparams[name] = value
869

    
870
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
871
      # use the parameters, without overriding
872
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
873
        if name not in self.op.osparams_private:
874
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
875

    
876
  def _RevertToDefaults(self, cluster):
877
    """Revert the instance parameters to the default values.
878

879
    """
880
    # hvparams
881
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
882
    for name in self.op.hvparams.keys():
883
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
884
        del self.op.hvparams[name]
885
    # beparams
886
    be_defs = cluster.SimpleFillBE({})
887
    for name in self.op.beparams.keys():
888
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
889
        del self.op.beparams[name]
890
    # nic params
891
    nic_defs = cluster.SimpleFillNIC({})
892
    for nic in self.op.nics:
893
      for name in constants.NICS_PARAMETERS:
894
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
895
          del nic[name]
896
    # osparams
897
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
898
    for name in self.op.osparams.keys():
899
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
900
        del self.op.osparams[name]
901

    
902
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
903
                                    os_params_private={})
904
    for name in self.op.osparams_private.keys():
905
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
906
        del self.op.osparams_private[name]
907

    
908
  def _CalculateFileStorageDir(self):
909
    """Calculate final instance file storage dir.
910

911
    """
912
    # file storage dir calculation/check
913
    self.instance_file_storage_dir = None
914
    if self.op.disk_template in constants.DTS_FILEBASED:
915
      # build the full file storage dir path
916
      joinargs = []
917

    
918
      cfg_storage = None
919
      if self.op.disk_template == constants.DT_FILE:
920
        cfg_storage = self.cfg.GetFileStorageDir()
921
      elif self.op.disk_template == constants.DT_SHARED_FILE:
922
        cfg_storage = self.cfg.GetSharedFileStorageDir()
923
      elif self.op.disk_template == constants.DT_GLUSTER:
924
        cfg_storage = self.cfg.GetGlusterStorageDir()
925

    
926
      if not cfg_storage:
927
        raise errors.OpPrereqError(
928
          "Cluster file storage dir for {tpl} storage type not defined".format(
929
            tpl=repr(self.op.disk_template)
930
          ),
931
          errors.ECODE_STATE
932
      )
933

    
934
      joinargs.append(cfg_storage)
935

    
936
      if self.op.file_storage_dir is not None:
937
        joinargs.append(self.op.file_storage_dir)
938

    
939
      if self.op.disk_template != constants.DT_GLUSTER:
940
        joinargs.append(self.op.instance_name)
941

    
942
      if len(joinargs) > 1:
943
        # pylint: disable=W0142
944
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
945
      else:
946
        self.instance_file_storage_dir = joinargs[0]
947

    
948
  def CheckPrereq(self): # pylint: disable=R0914
949
    """Check prerequisites.
950

951
    """
952
    # Check that the optimistically acquired groups are correct wrt the
953
    # acquired nodes
954
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
955
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
956
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
957
    if not owned_groups.issuperset(cur_groups):
958
      raise errors.OpPrereqError("New instance %s's node groups changed since"
959
                                 " locks were acquired, current groups are"
960
                                 " are '%s', owning groups '%s'; retry the"
961
                                 " operation" %
962
                                 (self.op.instance_name,
963
                                  utils.CommaJoin(cur_groups),
964
                                  utils.CommaJoin(owned_groups)),
965
                                 errors.ECODE_STATE)
966

    
967
    self._CalculateFileStorageDir()
968

    
969
    if self.op.mode == constants.INSTANCE_IMPORT:
970
      export_info = self._ReadExportInfo()
971
      self._ReadExportParams(export_info)
972
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
973
    else:
974
      self._old_instance_name = None
975

    
976
    if (not self.cfg.GetVGName() and
977
        self.op.disk_template not in constants.DTS_NOT_LVM):
978
      raise errors.OpPrereqError("Cluster does not support lvm-based"
979
                                 " instances", errors.ECODE_STATE)
980

    
981
    if (self.op.hypervisor is None or
982
        self.op.hypervisor == constants.VALUE_AUTO):
983
      self.op.hypervisor = self.cfg.GetHypervisorType()
984

    
985
    cluster = self.cfg.GetClusterInfo()
986
    enabled_hvs = cluster.enabled_hypervisors
987
    if self.op.hypervisor not in enabled_hvs:
988
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
989
                                 " cluster (%s)" %
990
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
991
                                 errors.ECODE_STATE)
992

    
993
    # Check tag validity
994
    for tag in self.op.tags:
995
      objects.TaggableObject.ValidateTag(tag)
996

    
997
    # check hypervisor parameter syntax (locally)
998
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
999
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
1000
                                      self.op.hvparams)
1001
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
1002
    hv_type.CheckParameterSyntax(filled_hvp)
1003
    self.hv_full = filled_hvp
1004
    # check that we don't specify global parameters on an instance
1005
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
1006
                         "instance", "cluster")
1007

    
1008
    # fill and remember the beparams dict
1009
    self.be_full = _ComputeFullBeParams(self.op, cluster)
1010

    
1011
    # build os parameters
1012
    if self.op.osparams_private is None:
1013
      self.op.osparams_private = serializer.PrivateDict()
1014
    if self.op.osparams_secret is None:
1015
      self.op.osparams_secret = serializer.PrivateDict()
1016

    
1017
    self.os_full = cluster.SimpleFillOS(
1018
      self.op.os_type,
1019
      self.op.osparams,
1020
      os_params_private=self.op.osparams_private,
1021
      os_params_secret=self.op.osparams_secret
1022
    )
1023

    
1024
    # now that hvp/bep are in final format, let's reset to defaults,
1025
    # if told to do so
1026
    if self.op.identify_defaults:
1027
      self._RevertToDefaults(cluster)
1028

    
1029
    # NIC buildup
1030
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1031
                             self.proc.GetECId())
1032

    
1033
    # disk checks/pre-build
1034
    default_vg = self.cfg.GetVGName()
1035
    self.disks = ComputeDisks(self.op, default_vg)
1036

    
1037
    if self.op.mode == constants.INSTANCE_IMPORT:
1038
      disk_images = []
1039
      for idx in range(len(self.disks)):
1040
        option = "disk%d_dump" % idx
1041
        if export_info.has_option(constants.INISECT_INS, option):
1042
          # FIXME: are the old os-es, disk sizes, etc. useful?
1043
          export_name = export_info.get(constants.INISECT_INS, option)
1044
          image = utils.PathJoin(self.op.src_path, export_name)
1045
          disk_images.append(image)
1046
        else:
1047
          disk_images.append(False)
1048

    
1049
      self.src_images = disk_images
1050

    
1051
      if self.op.instance_name == self._old_instance_name:
1052
        for idx, nic in enumerate(self.nics):
1053
          if nic.mac == constants.VALUE_AUTO:
1054
            nic_mac_ini = "nic%d_mac" % idx
1055
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1056

    
1057
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1058

    
1059
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1060
    if self.op.ip_check:
1061
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1062
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1063
                                   (self.check_ip, self.op.instance_name),
1064
                                   errors.ECODE_NOTUNIQUE)
1065

    
1066
    #### mac address generation
1067
    # By generating here the mac address both the allocator and the hooks get
1068
    # the real final mac address rather than the 'auto' or 'generate' value.
1069
    # There is a race condition between the generation and the instance object
1070
    # creation, which means that we know the mac is valid now, but we're not
1071
    # sure it will be when we actually add the instance. If things go bad
1072
    # adding the instance will abort because of a duplicate mac, and the
1073
    # creation job will fail.
1074
    for nic in self.nics:
1075
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1076
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1077

    
1078
    #### allocator run
1079

    
1080
    if self.op.iallocator is not None:
1081
      self._RunAllocator()
1082

    
1083
    # Release all unneeded node locks
1084
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1085
                               self.op.src_node_uuid])
1086
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1087
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1088
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1089
    # Release all unneeded group locks
1090
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1091
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1092

    
1093
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1094
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1095
      "Node locks differ from node resource locks"
1096

    
1097
    #### node related checks
1098

    
1099
    # check primary node
1100
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1101
    assert self.pnode is not None, \
1102
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1103
    if pnode.offline:
1104
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1105
                                 pnode.name, errors.ECODE_STATE)
1106
    if pnode.drained:
1107
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1108
                                 pnode.name, errors.ECODE_STATE)
1109
    if not pnode.vm_capable:
1110
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1111
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1112

    
1113
    self.secondaries = []
1114

    
1115
    # Fill in any IPs from IP pools. This must happen here, because we need to
1116
    # know the nic's primary node, as specified by the iallocator
1117
    for idx, nic in enumerate(self.nics):
1118
      net_uuid = nic.network
1119
      if net_uuid is not None:
1120
        nobj = self.cfg.GetNetwork(net_uuid)
1121
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1122
        if netparams is None:
1123
          raise errors.OpPrereqError("No netparams found for network"
1124
                                     " %s. Probably not connected to"
1125
                                     " node's %s nodegroup" %
1126
                                     (nobj.name, self.pnode.name),
1127
                                     errors.ECODE_INVAL)
1128
        self.LogInfo("NIC/%d inherits netparams %s" %
1129
                     (idx, netparams.values()))
1130
        nic.nicparams = dict(netparams)
1131
        if nic.ip is not None:
1132
          if nic.ip.lower() == constants.NIC_IP_POOL:
1133
            try:
1134
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1135
            except errors.ReservationError:
1136
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1137
                                         " from the address pool" % idx,
1138
                                         errors.ECODE_STATE)
1139
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1140
          else:
1141
            try:
1142
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1143
                                 check=self.op.conflicts_check)
1144
            except errors.ReservationError:
1145
              raise errors.OpPrereqError("IP address %s already in use"
1146
                                         " or does not belong to network %s" %
1147
                                         (nic.ip, nobj.name),
1148
                                         errors.ECODE_NOTUNIQUE)
1149

    
1150
      # net is None, ip None or given
1151
      elif self.op.conflicts_check:
1152
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1153

    
1154
    # mirror node verification
1155
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1156
      if self.op.snode_uuid == pnode.uuid:
1157
        raise errors.OpPrereqError("The secondary node cannot be the"
1158
                                   " primary node", errors.ECODE_INVAL)
1159
      CheckNodeOnline(self, self.op.snode_uuid)
1160
      CheckNodeNotDrained(self, self.op.snode_uuid)
1161
      CheckNodeVmCapable(self, self.op.snode_uuid)
1162
      self.secondaries.append(self.op.snode_uuid)
1163

    
1164
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1165
      if pnode.group != snode.group:
1166
        self.LogWarning("The primary and secondary nodes are in two"
1167
                        " different node groups; the disk parameters"
1168
                        " from the first disk's node group will be"
1169
                        " used")
1170

    
1171
    nodes = [pnode]
1172
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1173
      nodes.append(snode)
1174
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1175
    excl_stor = compat.any(map(has_es, nodes))
1176
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1177
      raise errors.OpPrereqError("Disk template %s not supported with"
1178
                                 " exclusive storage" % self.op.disk_template,
1179
                                 errors.ECODE_STATE)
1180
    for disk in self.disks:
1181
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1182

    
1183
    node_uuids = [pnode.uuid] + self.secondaries
1184

    
1185
    if not self.adopt_disks:
1186
      if self.op.disk_template == constants.DT_RBD:
1187
        # _CheckRADOSFreeSpace() is just a placeholder.
1188
        # Any function that checks prerequisites can be placed here.
1189
        # Check if there is enough space on the RADOS cluster.
1190
        CheckRADOSFreeSpace()
1191
      elif self.op.disk_template == constants.DT_EXT:
1192
        # FIXME: Function that checks prereqs if needed
1193
        pass
1194
      elif self.op.disk_template in constants.DTS_LVM:
1195
        # Check lv size requirements, if not adopting
1196
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1197
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1198
      else:
1199
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1200
        pass
1201

    
1202
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1203
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1204
                                disk[constants.IDISK_ADOPT])
1205
                     for disk in self.disks])
1206
      if len(all_lvs) != len(self.disks):
1207
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1208
                                   errors.ECODE_INVAL)
1209
      for lv_name in all_lvs:
1210
        try:
1211
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1212
          # to ReserveLV uses the same syntax
1213
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1214
        except errors.ReservationError:
1215
          raise errors.OpPrereqError("LV named %s used by another instance" %
1216
                                     lv_name, errors.ECODE_NOTUNIQUE)
1217

    
1218
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1219
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1220

    
1221
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1222
                                       vg_names.payload.keys())[pnode.uuid]
1223
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1224
      node_lvs = node_lvs.payload
1225

    
1226
      delta = all_lvs.difference(node_lvs.keys())
1227
      if delta:
1228
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1229
                                   utils.CommaJoin(delta),
1230
                                   errors.ECODE_INVAL)
1231
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1232
      if online_lvs:
1233
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1234
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1235
                                   errors.ECODE_STATE)
1236
      # update the size of disk based on what is found
1237
      for dsk in self.disks:
1238
        dsk[constants.IDISK_SIZE] = \
1239
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1240
                                        dsk[constants.IDISK_ADOPT])][0]))
1241

    
1242
    elif self.op.disk_template == constants.DT_BLOCK:
1243
      # Normalize and de-duplicate device paths
1244
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1245
                       for disk in self.disks])
1246
      if len(all_disks) != len(self.disks):
1247
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1248
                                   errors.ECODE_INVAL)
1249
      baddisks = [d for d in all_disks
1250
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1251
      if baddisks:
1252
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1253
                                   " cannot be adopted" %
1254
                                   (utils.CommaJoin(baddisks),
1255
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1256
                                   errors.ECODE_INVAL)
1257

    
1258
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1259
                                            list(all_disks))[pnode.uuid]
1260
      node_disks.Raise("Cannot get block device information from node %s" %
1261
                       pnode.name)
1262
      node_disks = node_disks.payload
1263
      delta = all_disks.difference(node_disks.keys())
1264
      if delta:
1265
        raise errors.OpPrereqError("Missing block device(s): %s" %
1266
                                   utils.CommaJoin(delta),
1267
                                   errors.ECODE_INVAL)
1268
      for dsk in self.disks:
1269
        dsk[constants.IDISK_SIZE] = \
1270
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1271

    
1272
    # Check disk access param to be compatible with specified hypervisor
1273
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1274
    node_group = self.cfg.GetNodeGroup(node_info.group)
1275
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1276
    access_type = disk_params[self.op.disk_template].get(
1277
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1278
    )
1279

    
1280
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1281
                                            self.op.disk_template,
1282
                                            access_type):
1283
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1284
                                 " used with %s disk access param" %
1285
                                 (self.op.hypervisor, access_type),
1286
                                  errors.ECODE_STATE)
1287

    
1288
    # Verify instance specs
1289
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1290
    ispec = {
1291
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1292
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1293
      constants.ISPEC_DISK_COUNT: len(self.disks),
1294
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1295
                                  for disk in self.disks],
1296
      constants.ISPEC_NIC_COUNT: len(self.nics),
1297
      constants.ISPEC_SPINDLE_USE: spindle_use,
1298
      }
1299

    
1300
    group_info = self.cfg.GetNodeGroup(pnode.group)
1301
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1302
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1303
                                               self.op.disk_template)
1304
    if not self.op.ignore_ipolicy and res:
1305
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1306
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1307
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1308

    
1309
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1310

    
1311
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1312
    # check OS parameters (remotely)
1313
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1314

    
1315
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1316

    
1317
    #TODO: _CheckExtParams (remotely)
1318
    # Check parameters for extstorage
1319

    
1320
    # memory check on primary node
1321
    #TODO(dynmem): use MINMEM for checking
1322
    if self.op.start:
1323
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1324
                                self.op.hvparams)
1325
      CheckNodeFreeMemory(self, self.pnode.uuid,
1326
                          "creating instance %s" % self.op.instance_name,
1327
                          self.be_full[constants.BE_MAXMEM],
1328
                          self.op.hypervisor, hvfull)
1329

    
1330
    self.dry_run_result = list(node_uuids)
1331

    
1332
  def Exec(self, feedback_fn):
1333
    """Create and add the instance to the cluster.
1334

1335
    """
1336
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1337
                self.owned_locks(locking.LEVEL_NODE)), \
1338
      "Node locks differ from node resource locks"
1339
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1340

    
1341
    ht_kind = self.op.hypervisor
1342
    if ht_kind in constants.HTS_REQ_PORT:
1343
      network_port = self.cfg.AllocatePort()
1344
    else:
1345
      network_port = None
1346

    
1347
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1348

    
1349
    # This is ugly but we got a chicken-egg problem here
1350
    # We can only take the group disk parameters, as the instance
1351
    # has no disks yet (we are generating them right here).
1352
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1353
    disks = GenerateDiskTemplate(self,
1354
                                 self.op.disk_template,
1355
                                 instance_uuid, self.pnode.uuid,
1356
                                 self.secondaries,
1357
                                 self.disks,
1358
                                 self.instance_file_storage_dir,
1359
                                 self.op.file_driver,
1360
                                 0,
1361
                                 feedback_fn,
1362
                                 self.cfg.GetGroupDiskParams(nodegroup))
1363

    
1364
    iobj = objects.Instance(name=self.op.instance_name,
1365
                            uuid=instance_uuid,
1366
                            os=self.op.os_type,
1367
                            primary_node=self.pnode.uuid,
1368
                            nics=self.nics, disks=disks,
1369
                            disk_template=self.op.disk_template,
1370
                            disks_active=False,
1371
                            admin_state=constants.ADMINST_DOWN,
1372
                            network_port=network_port,
1373
                            beparams=self.op.beparams,
1374
                            hvparams=self.op.hvparams,
1375
                            hypervisor=self.op.hypervisor,
1376
                            osparams=self.op.osparams,
1377
                            osparams_private=self.op.osparams_private,
1378
                            )
1379

    
1380
    if self.op.tags:
1381
      for tag in self.op.tags:
1382
        iobj.AddTag(tag)
1383

    
1384
    if self.adopt_disks:
1385
      if self.op.disk_template == constants.DT_PLAIN:
1386
        # rename LVs to the newly-generated names; we need to construct
1387
        # 'fake' LV disks with the old data, plus the new unique_id
1388
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1389
        rename_to = []
1390
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1391
          rename_to.append(t_dsk.logical_id)
1392
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1393
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1394
                                               zip(tmp_disks, rename_to))
1395
        result.Raise("Failed to rename adoped LVs")
1396
    else:
1397
      feedback_fn("* creating instance disks...")
1398
      try:
1399
        CreateDisks(self, iobj)
1400
      except errors.OpExecError:
1401
        self.LogWarning("Device creation failed")
1402
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1403
        raise
1404

    
1405
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1406

    
1407
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1408

    
1409
    # Declare that we don't want to remove the instance lock anymore, as we've
1410
    # added the instance to the config
1411
    del self.remove_locks[locking.LEVEL_INSTANCE]
1412

    
1413
    if self.op.mode == constants.INSTANCE_IMPORT:
1414
      # Release unused nodes
1415
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1416
    else:
1417
      # Release all nodes
1418
      ReleaseLocks(self, locking.LEVEL_NODE)
1419

    
1420
    disk_abort = False
1421
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1422
      feedback_fn("* wiping instance disks...")
1423
      try:
1424
        WipeDisks(self, iobj)
1425
      except errors.OpExecError, err:
1426
        logging.exception("Wiping disks failed")
1427
        self.LogWarning("Wiping instance disks failed (%s)", err)
1428
        disk_abort = True
1429

    
1430
    if disk_abort:
1431
      # Something is already wrong with the disks, don't do anything else
1432
      pass
1433
    elif self.op.wait_for_sync:
1434
      disk_abort = not WaitForSync(self, iobj)
1435
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1436
      # make sure the disks are not degraded (still sync-ing is ok)
1437
      feedback_fn("* checking mirrors status")
1438
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1439
    else:
1440
      disk_abort = False
1441

    
1442
    if disk_abort:
1443
      RemoveDisks(self, iobj)
1444
      self.cfg.RemoveInstance(iobj.uuid)
1445
      # Make sure the instance lock gets removed
1446
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1447
      raise errors.OpExecError("There are some degraded disks for"
1448
                               " this instance")
1449

    
1450
    # instance disks are now active
1451
    iobj.disks_active = True
1452

    
1453
    # Release all node resource locks
1454
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1455

    
1456
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1457
      if self.op.mode == constants.INSTANCE_CREATE:
1458
        if not self.op.no_install:
1459
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1460
                        not self.op.wait_for_sync)
1461
          if pause_sync:
1462
            feedback_fn("* pausing disk sync to install instance OS")
1463
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1464
                                                              (iobj.disks,
1465
                                                               iobj), True)
1466
            for idx, success in enumerate(result.payload):
1467
              if not success:
1468
                logging.warn("pause-sync of instance %s for disk %d failed",
1469
                             self.op.instance_name, idx)
1470

    
1471
          feedback_fn("* running the instance OS create scripts...")
1472
          # FIXME: pass debug option from opcode to backend
1473
          os_add_result = \
1474
            self.rpc.call_instance_os_add(self.pnode.uuid,
1475
                                          (iobj, self.op.osparams_secret),
1476
                                          False,
1477
                                          self.op.debug_level)
1478
          if pause_sync:
1479
            feedback_fn("* resuming disk sync")
1480
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1481
                                                              (iobj.disks,
1482
                                                               iobj), False)
1483
            for idx, success in enumerate(result.payload):
1484
              if not success:
1485
                logging.warn("resume-sync of instance %s for disk %d failed",
1486
                             self.op.instance_name, idx)
1487

    
1488
          os_add_result.Raise("Could not add os for instance %s"
1489
                              " on node %s" % (self.op.instance_name,
1490
                                               self.pnode.name))
1491

    
1492
      else:
1493
        if self.op.mode == constants.INSTANCE_IMPORT:
1494
          feedback_fn("* running the instance OS import scripts...")
1495

    
1496
          transfers = []
1497

    
1498
          for idx, image in enumerate(self.src_images):
1499
            if not image:
1500
              continue
1501

    
1502
            # FIXME: pass debug option from opcode to backend
1503
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1504
                                               constants.IEIO_FILE, (image, ),
1505
                                               constants.IEIO_SCRIPT,
1506
                                               ((iobj.disks[idx], iobj), idx),
1507
                                               None)
1508
            transfers.append(dt)
1509

    
1510
          import_result = \
1511
            masterd.instance.TransferInstanceData(self, feedback_fn,
1512
                                                  self.op.src_node_uuid,
1513
                                                  self.pnode.uuid,
1514
                                                  self.pnode.secondary_ip,
1515
                                                  self.op.compress,
1516
                                                  iobj, transfers)
1517
          if not compat.all(import_result):
1518
            self.LogWarning("Some disks for instance %s on node %s were not"
1519
                            " imported successfully" % (self.op.instance_name,
1520
                                                        self.pnode.name))
1521

    
1522
          rename_from = self._old_instance_name
1523

    
1524
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1525
          feedback_fn("* preparing remote import...")
1526
          # The source cluster will stop the instance before attempting to make
1527
          # a connection. In some cases stopping an instance can take a long
1528
          # time, hence the shutdown timeout is added to the connection
1529
          # timeout.
1530
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1531
                             self.op.source_shutdown_timeout)
1532
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1533

    
1534
          assert iobj.primary_node == self.pnode.uuid
1535
          disk_results = \
1536
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1537
                                          self.source_x509_ca,
1538
                                          self._cds, self.op.compress, timeouts)
1539
          if not compat.all(disk_results):
1540
            # TODO: Should the instance still be started, even if some disks
1541
            # failed to import (valid for local imports, too)?
1542
            self.LogWarning("Some disks for instance %s on node %s were not"
1543
                            " imported successfully" % (self.op.instance_name,
1544
                                                        self.pnode.name))
1545

    
1546
          rename_from = self.source_instance_name
1547

    
1548
        else:
1549
          # also checked in the prereq part
1550
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1551
                                       % self.op.mode)
1552

    
1553
        # Run rename script on newly imported instance
1554
        assert iobj.name == self.op.instance_name
1555
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1556
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1557
                                                   rename_from,
1558
                                                   self.op.debug_level)
1559
        result.Warn("Failed to run rename script for %s on node %s" %
1560
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1561

    
1562
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1563

    
1564
    if self.op.start:
1565
      iobj.admin_state = constants.ADMINST_UP
1566
      self.cfg.Update(iobj, feedback_fn)
1567
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1568
                   self.pnode.name)
1569
      feedback_fn("* starting instance...")
1570
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1571
                                            False, self.op.reason)
1572
      result.Raise("Could not start instance")
1573

    
1574
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1575

    
1576

    
1577
class LUInstanceRename(LogicalUnit):
1578
  """Rename an instance.
1579

1580
  """
1581
  HPATH = "instance-rename"
1582
  HTYPE = constants.HTYPE_INSTANCE
1583

    
1584
  def CheckArguments(self):
1585
    """Check arguments.
1586

1587
    """
1588
    if self.op.ip_check and not self.op.name_check:
1589
      # TODO: make the ip check more flexible and not depend on the name check
1590
      raise errors.OpPrereqError("IP address check requires a name check",
1591
                                 errors.ECODE_INVAL)
1592

    
1593
  def BuildHooksEnv(self):
1594
    """Build hooks env.
1595

1596
    This runs on master, primary and secondary nodes of the instance.
1597

1598
    """
1599
    env = BuildInstanceHookEnvByObject(self, self.instance)
1600
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1601
    return env
1602

    
1603
  def BuildHooksNodes(self):
1604
    """Build hooks nodes.
1605

1606
    """
1607
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1608
    return (nl, nl)
1609

    
1610
  def CheckPrereq(self):
1611
    """Check prerequisites.
1612

1613
    This checks that the instance is in the cluster and is not running.
1614

1615
    """
1616
    (self.op.instance_uuid, self.op.instance_name) = \
1617
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1618
                                self.op.instance_name)
1619
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1620
    assert instance is not None
1621

    
1622
    # It should actually not happen that an instance is running with a disabled
1623
    # disk template, but in case it does, the renaming of file-based instances
1624
    # will fail horribly. Thus, we test it before.
1625
    if (instance.disk_template in constants.DTS_FILEBASED and
1626
        self.op.new_name != instance.name):
1627
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1628
                               instance.disk_template)
1629

    
1630
    CheckNodeOnline(self, instance.primary_node)
1631
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1632
                       msg="cannot rename")
1633
    self.instance = instance
1634

    
1635
    new_name = self.op.new_name
1636
    if self.op.name_check:
1637
      hostname = _CheckHostnameSane(self, new_name)
1638
      new_name = self.op.new_name = hostname.name
1639
      if (self.op.ip_check and
1640
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1641
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1642
                                   (hostname.ip, new_name),
1643
                                   errors.ECODE_NOTUNIQUE)
1644

    
1645
    instance_names = [inst.name for
1646
                      inst in self.cfg.GetAllInstancesInfo().values()]
1647
    if new_name in instance_names and new_name != instance.name:
1648
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1649
                                 new_name, errors.ECODE_EXISTS)
1650

    
1651
  def Exec(self, feedback_fn):
1652
    """Rename the instance.
1653

1654
    """
1655
    old_name = self.instance.name
1656

    
1657
    rename_file_storage = False
1658
    if (self.instance.disk_template in (constants.DT_FILE,
1659
                                        constants.DT_SHARED_FILE) and
1660
        self.op.new_name != self.instance.name):
1661
      old_file_storage_dir = os.path.dirname(
1662
                               self.instance.disks[0].logical_id[1])
1663
      rename_file_storage = True
1664

    
1665
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1666
    # Change the instance lock. This is definitely safe while we hold the BGL.
1667
    # Otherwise the new lock would have to be added in acquired mode.
1668
    assert self.REQ_BGL
1669
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1670
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1671
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1672

    
1673
    # re-read the instance from the configuration after rename
1674
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1675

    
1676
    if rename_file_storage:
1677
      new_file_storage_dir = os.path.dirname(
1678
                               renamed_inst.disks[0].logical_id[1])
1679
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1680
                                                     old_file_storage_dir,
1681
                                                     new_file_storage_dir)
1682
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1683
                   " (but the instance has been renamed in Ganeti)" %
1684
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1685
                    old_file_storage_dir, new_file_storage_dir))
1686

    
1687
    StartInstanceDisks(self, renamed_inst, None)
1688
    # update info on disks
1689
    info = GetInstanceInfoText(renamed_inst)
1690
    for (idx, disk) in enumerate(renamed_inst.disks):
1691
      for node_uuid in renamed_inst.all_nodes:
1692
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1693
                                                (disk, renamed_inst), info)
1694
        result.Warn("Error setting info on node %s for disk %s" %
1695
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1696
    try:
1697
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1698
                                                 renamed_inst, old_name,
1699
                                                 self.op.debug_level)
1700
      result.Warn("Could not run OS rename script for instance %s on node %s"
1701
                  " (but the instance has been renamed in Ganeti)" %
1702
                  (renamed_inst.name,
1703
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1704
                  self.LogWarning)
1705
    finally:
1706
      ShutdownInstanceDisks(self, renamed_inst)
1707

    
1708
    return renamed_inst.name
1709

    
1710

    
1711
class LUInstanceRemove(LogicalUnit):
1712
  """Remove an instance.
1713

1714
  """
1715
  HPATH = "instance-remove"
1716
  HTYPE = constants.HTYPE_INSTANCE
1717
  REQ_BGL = False
1718

    
1719
  def ExpandNames(self):
1720
    self._ExpandAndLockInstance()
1721
    self.needed_locks[locking.LEVEL_NODE] = []
1722
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1723
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1724

    
1725
  def DeclareLocks(self, level):
1726
    if level == locking.LEVEL_NODE:
1727
      self._LockInstancesNodes()
1728
    elif level == locking.LEVEL_NODE_RES:
1729
      # Copy node locks
1730
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1731
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1732

    
1733
  def BuildHooksEnv(self):
1734
    """Build hooks env.
1735

1736
    This runs on master, primary and secondary nodes of the instance.
1737

1738
    """
1739
    env = BuildInstanceHookEnvByObject(self, self.instance)
1740
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1741
    return env
1742

    
1743
  def BuildHooksNodes(self):
1744
    """Build hooks nodes.
1745

1746
    """
1747
    nl = [self.cfg.GetMasterNode()]
1748
    nl_post = list(self.instance.all_nodes) + nl
1749
    return (nl, nl_post)
1750

    
1751
  def CheckPrereq(self):
1752
    """Check prerequisites.
1753

1754
    This checks that the instance is in the cluster.
1755

1756
    """
1757
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1758
    assert self.instance is not None, \
1759
      "Cannot retrieve locked instance %s" % self.op.instance_name
1760

    
1761
  def Exec(self, feedback_fn):
1762
    """Remove the instance.
1763

1764
    """
1765
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1766
                 self.cfg.GetNodeName(self.instance.primary_node))
1767

    
1768
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1769
                                             self.instance,
1770
                                             self.op.shutdown_timeout,
1771
                                             self.op.reason)
1772
    if self.op.ignore_failures:
1773
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1774
    else:
1775
      result.Raise("Could not shutdown instance %s on node %s" %
1776
                   (self.instance.name,
1777
                    self.cfg.GetNodeName(self.instance.primary_node)))
1778

    
1779
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1780
            self.owned_locks(locking.LEVEL_NODE_RES))
1781
    assert not (set(self.instance.all_nodes) -
1782
                self.owned_locks(locking.LEVEL_NODE)), \
1783
      "Not owning correct locks"
1784

    
1785
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1786

    
1787

    
1788
class LUInstanceMove(LogicalUnit):
1789
  """Move an instance by data-copying.
1790

1791
  """
1792
  HPATH = "instance-move"
1793
  HTYPE = constants.HTYPE_INSTANCE
1794
  REQ_BGL = False
1795

    
1796
  def ExpandNames(self):
1797
    self._ExpandAndLockInstance()
1798
    (self.op.target_node_uuid, self.op.target_node) = \
1799
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1800
                            self.op.target_node)
1801
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1802
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1803
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1804

    
1805
  def DeclareLocks(self, level):
1806
    if level == locking.LEVEL_NODE:
1807
      self._LockInstancesNodes(primary_only=True)
1808
    elif level == locking.LEVEL_NODE_RES:
1809
      # Copy node locks
1810
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1811
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1812

    
1813
  def BuildHooksEnv(self):
1814
    """Build hooks env.
1815

1816
    This runs on master, primary and target nodes of the instance.
1817

1818
    """
1819
    env = {
1820
      "TARGET_NODE": self.op.target_node,
1821
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1822
      }
1823
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1824
    return env
1825

    
1826
  def BuildHooksNodes(self):
1827
    """Build hooks nodes.
1828

1829
    """
1830
    nl = [
1831
      self.cfg.GetMasterNode(),
1832
      self.instance.primary_node,
1833
      self.op.target_node_uuid,
1834
      ]
1835
    return (nl, nl)
1836

    
1837
  def CheckPrereq(self):
1838
    """Check prerequisites.
1839

1840
    This checks that the instance is in the cluster.
1841

1842
    """
1843
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1844
    assert self.instance is not None, \
1845
      "Cannot retrieve locked instance %s" % self.op.instance_name
1846

    
1847
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1848
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1849
                                 self.instance.disk_template,
1850
                                 errors.ECODE_STATE)
1851

    
1852
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1853
    assert target_node is not None, \
1854
      "Cannot retrieve locked node %s" % self.op.target_node
1855

    
1856
    self.target_node_uuid = target_node.uuid
1857
    if target_node.uuid == self.instance.primary_node:
1858
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1859
                                 (self.instance.name, target_node.name),
1860
                                 errors.ECODE_STATE)
1861

    
1862
    cluster = self.cfg.GetClusterInfo()
1863
    bep = cluster.FillBE(self.instance)
1864

    
1865
    for idx, dsk in enumerate(self.instance.disks):
1866
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1867
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1868
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1869
                                   " cannot copy" % idx, errors.ECODE_STATE)
1870

    
1871
    CheckNodeOnline(self, target_node.uuid)
1872
    CheckNodeNotDrained(self, target_node.uuid)
1873
    CheckNodeVmCapable(self, target_node.uuid)
1874
    group_info = self.cfg.GetNodeGroup(target_node.group)
1875
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1876
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1877
                           ignore=self.op.ignore_ipolicy)
1878

    
1879
    if self.instance.admin_state == constants.ADMINST_UP:
1880
      # check memory requirements on the target node
1881
      CheckNodeFreeMemory(
1882
          self, target_node.uuid, "failing over instance %s" %
1883
          self.instance.name, bep[constants.BE_MAXMEM],
1884
          self.instance.hypervisor,
1885
          cluster.hvparams[self.instance.hypervisor])
1886
    else:
1887
      self.LogInfo("Not checking memory on the secondary node as"
1888
                   " instance will not be started")
1889

    
1890
    # check bridge existance
1891
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1892

    
1893
  def Exec(self, feedback_fn):
1894
    """Move an instance.
1895

1896
    The move is done by shutting it down on its present node, copying
1897
    the data over (slow) and starting it on the new node.
1898

1899
    """
1900
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1901
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1902

    
1903
    self.LogInfo("Shutting down instance %s on source node %s",
1904
                 self.instance.name, source_node.name)
1905

    
1906
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1907
            self.owned_locks(locking.LEVEL_NODE_RES))
1908

    
1909
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1910
                                             self.op.shutdown_timeout,
1911
                                             self.op.reason)
1912
    if self.op.ignore_consistency:
1913
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1914
                  " anyway. Please make sure node %s is down. Error details" %
1915
                  (self.instance.name, source_node.name, source_node.name),
1916
                  self.LogWarning)
1917
    else:
1918
      result.Raise("Could not shutdown instance %s on node %s" %
1919
                   (self.instance.name, source_node.name))
1920

    
1921
    # create the target disks
1922
    try:
1923
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1924
    except errors.OpExecError:
1925
      self.LogWarning("Device creation failed")
1926
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1927
      raise
1928

    
1929
    errs = []
1930
    transfers = []
1931
    # activate, get path, create transfer jobs
1932
    for idx, disk in enumerate(self.instance.disks):
1933
      # FIXME: pass debug option from opcode to backend
1934
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1935
                                         constants.IEIO_RAW_DISK,
1936
                                         (disk, self.instance),
1937
                                         constants.IEIO_RAW_DISK,
1938
                                         (disk, self.instance),
1939
                                         None)
1940
      transfers.append(dt)
1941

    
1942
    import_result = \
1943
      masterd.instance.TransferInstanceData(self, feedback_fn,
1944
                                            source_node.uuid,
1945
                                            target_node.uuid,
1946
                                            target_node.secondary_ip,
1947
                                            self.op.compress,
1948
                                            self.instance, transfers)
1949
    if not compat.all(import_result):
1950
      errs.append("Failed to transfer instance data")
1951

    
1952
    if errs:
1953
      self.LogWarning("Some disks failed to copy, aborting")
1954
      try:
1955
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1956
      finally:
1957
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1958
        raise errors.OpExecError("Errors during disk copy: %s" %
1959
                                 (",".join(errs),))
1960

    
1961
    self.instance.primary_node = target_node.uuid
1962
    self.cfg.Update(self.instance, feedback_fn)
1963

    
1964
    self.LogInfo("Removing the disks on the original node")
1965
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1966

    
1967
    # Only start the instance if it's marked as up
1968
    if self.instance.admin_state == constants.ADMINST_UP:
1969
      self.LogInfo("Starting instance %s on node %s",
1970
                   self.instance.name, target_node.name)
1971

    
1972
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1973
                                          ignore_secondaries=True)
1974
      if not disks_ok:
1975
        ShutdownInstanceDisks(self, self.instance)
1976
        raise errors.OpExecError("Can't activate the instance's disks")
1977

    
1978
      result = self.rpc.call_instance_start(target_node.uuid,
1979
                                            (self.instance, None, None), False,
1980
                                            self.op.reason)
1981
      msg = result.fail_msg
1982
      if msg:
1983
        ShutdownInstanceDisks(self, self.instance)
1984
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1985
                                 (self.instance.name, target_node.name, msg))
1986

    
1987

    
1988
class LUInstanceMultiAlloc(NoHooksLU):
1989
  """Allocates multiple instances at the same time.
1990

1991
  """
1992
  REQ_BGL = False
1993

    
1994
  def CheckArguments(self):
1995
    """Check arguments.
1996

1997
    """
1998
    nodes = []
1999
    for inst in self.op.instances:
2000
      if inst.iallocator is not None:
2001
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
2002
                                   " instance objects", errors.ECODE_INVAL)
2003
      nodes.append(bool(inst.pnode))
2004
      if inst.disk_template in constants.DTS_INT_MIRROR:
2005
        nodes.append(bool(inst.snode))
2006

    
2007
    has_nodes = compat.any(nodes)
2008
    if compat.all(nodes) ^ has_nodes:
2009
      raise errors.OpPrereqError("There are instance objects providing"
2010
                                 " pnode/snode while others do not",
2011
                                 errors.ECODE_INVAL)
2012

    
2013
    if not has_nodes and self.op.iallocator is None:
2014
      default_iallocator = self.cfg.GetDefaultIAllocator()
2015
      if default_iallocator:
2016
        self.op.iallocator = default_iallocator
2017
      else:
2018
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2019
                                   " given and no cluster-wide default"
2020
                                   " iallocator found; please specify either"
2021
                                   " an iallocator or nodes on the instances"
2022
                                   " or set a cluster-wide default iallocator",
2023
                                   errors.ECODE_INVAL)
2024

    
2025
    _CheckOpportunisticLocking(self.op)
2026

    
2027
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2028
    if dups:
2029
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2030
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2031

    
2032
  def ExpandNames(self):
2033
    """Calculate the locks.
2034

2035
    """
2036
    self.share_locks = ShareAll()
2037
    self.needed_locks = {
2038
      # iallocator will select nodes and even if no iallocator is used,
2039
      # collisions with LUInstanceCreate should be avoided
2040
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2041
      }
2042

    
2043
    if self.op.iallocator:
2044
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2045
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2046

    
2047
      if self.op.opportunistic_locking:
2048
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2049
    else:
2050
      nodeslist = []
2051
      for inst in self.op.instances:
2052
        (inst.pnode_uuid, inst.pnode) = \
2053
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2054
        nodeslist.append(inst.pnode_uuid)
2055
        if inst.snode is not None:
2056
          (inst.snode_uuid, inst.snode) = \
2057
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2058
          nodeslist.append(inst.snode_uuid)
2059

    
2060
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2061
      # Lock resources of instance's primary and secondary nodes (copy to
2062
      # prevent accidential modification)
2063
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2064

    
2065
  def DeclareLocks(self, level):
2066
    if level == locking.LEVEL_NODE_RES and \
2067
      self.opportunistic_locks[locking.LEVEL_NODE]:
2068
      # Even when using opportunistic locking, we require the same set of
2069
      # NODE_RES locks as we got NODE locks
2070
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2071
        self.owned_locks(locking.LEVEL_NODE)
2072

    
2073
  def CheckPrereq(self):
2074
    """Check prerequisite.
2075

2076
    """
2077
    if self.op.iallocator:
2078
      cluster = self.cfg.GetClusterInfo()
2079
      default_vg = self.cfg.GetVGName()
2080
      ec_id = self.proc.GetECId()
2081

    
2082
      if self.op.opportunistic_locking:
2083
        # Only consider nodes for which a lock is held
2084
        node_whitelist = self.cfg.GetNodeNames(
2085
                           list(self.owned_locks(locking.LEVEL_NODE)))
2086
      else:
2087
        node_whitelist = None
2088

    
2089
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2090
                                           _ComputeNics(op, cluster, None,
2091
                                                        self.cfg, ec_id),
2092
                                           _ComputeFullBeParams(op, cluster),
2093
                                           node_whitelist)
2094
               for op in self.op.instances]
2095

    
2096
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2097
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2098

    
2099
      ial.Run(self.op.iallocator)
2100

    
2101
      if not ial.success:
2102
        raise errors.OpPrereqError("Can't compute nodes using"
2103
                                   " iallocator '%s': %s" %
2104
                                   (self.op.iallocator, ial.info),
2105
                                   errors.ECODE_NORES)
2106

    
2107
      self.ia_result = ial.result
2108

    
2109
    if self.op.dry_run:
2110
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2111
        constants.JOB_IDS_KEY: [],
2112
        })
2113

    
2114
  def _ConstructPartialResult(self):
2115
    """Contructs the partial result.
2116

2117
    """
2118
    if self.op.iallocator:
2119
      (allocatable, failed_insts) = self.ia_result
2120
      allocatable_insts = map(compat.fst, allocatable)
2121
    else:
2122
      allocatable_insts = [op.instance_name for op in self.op.instances]
2123
      failed_insts = []
2124

    
2125
    return {
2126
      constants.ALLOCATABLE_KEY: allocatable_insts,
2127
      constants.FAILED_KEY: failed_insts,
2128
      }
2129

    
2130
  def Exec(self, feedback_fn):
2131
    """Executes the opcode.
2132

2133
    """
2134
    jobs = []
2135
    if self.op.iallocator:
2136
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2137
      (allocatable, failed) = self.ia_result
2138

    
2139
      for (name, node_names) in allocatable:
2140
        op = op2inst.pop(name)
2141

    
2142
        (op.pnode_uuid, op.pnode) = \
2143
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2144
        if len(node_names) > 1:
2145
          (op.snode_uuid, op.snode) = \
2146
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2147

    
2148
          jobs.append([op])
2149

    
2150
        missing = set(op2inst.keys()) - set(failed)
2151
        assert not missing, \
2152
          "Iallocator did return incomplete result: %s" % \
2153
          utils.CommaJoin(missing)
2154
    else:
2155
      jobs.extend([op] for op in self.op.instances)
2156

    
2157
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2158

    
2159

    
2160
class _InstNicModPrivate:
2161
  """Data structure for network interface modifications.
2162

2163
  Used by L{LUInstanceSetParams}.
2164

2165
  """
2166
  def __init__(self):
2167
    self.params = None
2168
    self.filled = None
2169

    
2170

    
2171
def _PrepareContainerMods(mods, private_fn):
2172
  """Prepares a list of container modifications by adding a private data field.
2173

2174
  @type mods: list of tuples; (operation, index, parameters)
2175
  @param mods: List of modifications
2176
  @type private_fn: callable or None
2177
  @param private_fn: Callable for constructing a private data field for a
2178
    modification
2179
  @rtype: list
2180

2181
  """
2182
  if private_fn is None:
2183
    fn = lambda: None
2184
  else:
2185
    fn = private_fn
2186

    
2187
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2188

    
2189

    
2190
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2191
  """Checks if nodes have enough physical CPUs
2192

2193
  This function checks if all given nodes have the needed number of
2194
  physical CPUs. In case any node has less CPUs or we cannot get the
2195
  information from the node, this function raises an OpPrereqError
2196
  exception.
2197

2198
  @type lu: C{LogicalUnit}
2199
  @param lu: a logical unit from which we get configuration data
2200
  @type node_uuids: C{list}
2201
  @param node_uuids: the list of node UUIDs to check
2202
  @type requested: C{int}
2203
  @param requested: the minimum acceptable number of physical CPUs
2204
  @type hypervisor_specs: list of pairs (string, dict of strings)
2205
  @param hypervisor_specs: list of hypervisor specifications in
2206
      pairs (hypervisor_name, hvparams)
2207
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2208
      or we cannot check the node
2209

2210
  """
2211
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2212
  for node_uuid in node_uuids:
2213
    info = nodeinfo[node_uuid]
2214
    node_name = lu.cfg.GetNodeName(node_uuid)
2215
    info.Raise("Cannot get current information from node %s" % node_name,
2216
               prereq=True, ecode=errors.ECODE_ENVIRON)
2217
    (_, _, (hv_info, )) = info.payload
2218
    num_cpus = hv_info.get("cpu_total", None)
2219
    if not isinstance(num_cpus, int):
2220
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2221
                                 " on node %s, result was '%s'" %
2222
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2223
    if requested > num_cpus:
2224
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2225
                                 "required" % (node_name, num_cpus, requested),
2226
                                 errors.ECODE_NORES)
2227

    
2228

    
2229
def GetItemFromContainer(identifier, kind, container):
2230
  """Return the item refered by the identifier.
2231

2232
  @type identifier: string
2233
  @param identifier: Item index or name or UUID
2234
  @type kind: string
2235
  @param kind: One-word item description
2236
  @type container: list
2237
  @param container: Container to get the item from
2238

2239
  """
2240
  # Index
2241
  try:
2242
    idx = int(identifier)
2243
    if idx == -1:
2244
      # Append
2245
      absidx = len(container) - 1
2246
    elif idx < 0:
2247
      raise IndexError("Not accepting negative indices other than -1")
2248
    elif idx > len(container):
2249
      raise IndexError("Got %s index %s, but there are only %s" %
2250
                       (kind, idx, len(container)))
2251
    else:
2252
      absidx = idx
2253
    return (absidx, container[idx])
2254
  except ValueError:
2255
    pass
2256

    
2257
  for idx, item in enumerate(container):
2258
    if item.uuid == identifier or item.name == identifier:
2259
      return (idx, item)
2260

    
2261
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2262
                             (kind, identifier), errors.ECODE_NOENT)
2263

    
2264

    
2265
def _ApplyContainerMods(kind, container, chgdesc, mods,
2266
                        create_fn, modify_fn, remove_fn,
2267
                        post_add_fn=None):
2268
  """Applies descriptions in C{mods} to C{container}.
2269

2270
  @type kind: string
2271
  @param kind: One-word item description
2272
  @type container: list
2273
  @param container: Container to modify
2274
  @type chgdesc: None or list
2275
  @param chgdesc: List of applied changes
2276
  @type mods: list
2277
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2278
  @type create_fn: callable
2279
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2280
    receives absolute item index, parameters and private data object as added
2281
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2282
    as list
2283
  @type modify_fn: callable
2284
  @param modify_fn: Callback for modifying an existing item
2285
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2286
    and private data object as added by L{_PrepareContainerMods}, returns
2287
    changes as list
2288
  @type remove_fn: callable
2289
  @param remove_fn: Callback on removing item; receives absolute item index,
2290
    item and private data object as added by L{_PrepareContainerMods}
2291
  @type post_add_fn: callable
2292
  @param post_add_fn: Callable for post-processing a newly created item after
2293
    it has been put into the container. It receives the index of the new item
2294
    and the new item as parameters.
2295

2296
  """
2297
  for (op, identifier, params, private) in mods:
2298
    changes = None
2299

    
2300
    if op == constants.DDM_ADD:
2301
      # Calculate where item will be added
2302
      # When adding an item, identifier can only be an index
2303
      try:
2304
        idx = int(identifier)
2305
      except ValueError:
2306
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2307
                                   " identifier for %s" % constants.DDM_ADD,
2308
                                   errors.ECODE_INVAL)
2309
      if idx == -1:
2310
        addidx = len(container)
2311
      else:
2312
        if idx < 0:
2313
          raise IndexError("Not accepting negative indices other than -1")
2314
        elif idx > len(container):
2315
          raise IndexError("Got %s index %s, but there are only %s" %
2316
                           (kind, idx, len(container)))
2317
        addidx = idx
2318

    
2319
      if create_fn is None:
2320
        item = params
2321
      else:
2322
        (item, changes) = create_fn(addidx, params, private)
2323

    
2324
      if idx == -1:
2325
        container.append(item)
2326
      else:
2327
        assert idx >= 0
2328
        assert idx <= len(container)
2329
        # list.insert does so before the specified index
2330
        container.insert(idx, item)
2331

    
2332
      if post_add_fn is not None:
2333
        post_add_fn(addidx, item)
2334

    
2335
    else:
2336
      # Retrieve existing item
2337
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2338

    
2339
      if op == constants.DDM_REMOVE:
2340
        assert not params
2341

    
2342
        changes = [("%s/%s" % (kind, absidx), "remove")]
2343

    
2344
        if remove_fn is not None:
2345
          msg = remove_fn(absidx, item, private)
2346
          if msg:
2347
            changes.append(("%s/%s" % (kind, absidx), msg))
2348

    
2349
        assert container[absidx] == item
2350
        del container[absidx]
2351
      elif op == constants.DDM_MODIFY:
2352
        if modify_fn is not None:
2353
          changes = modify_fn(absidx, item, params, private)
2354
      else:
2355
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2356

    
2357
    assert _TApplyContModsCbChanges(changes)
2358

    
2359
    if not (chgdesc is None or changes is None):
2360
      chgdesc.extend(changes)
2361

    
2362

    
2363
def _UpdateIvNames(base_index, disks):
2364
  """Updates the C{iv_name} attribute of disks.
2365

2366
  @type disks: list of L{objects.Disk}
2367

2368
  """
2369
  for (idx, disk) in enumerate(disks):
2370
    disk.iv_name = "disk/%s" % (base_index + idx, )
2371

    
2372

    
2373
class LUInstanceSetParams(LogicalUnit):
2374
  """Modifies an instances's parameters.
2375

2376
  """
2377
  HPATH = "instance-modify"
2378
  HTYPE = constants.HTYPE_INSTANCE
2379
  REQ_BGL = False
2380

    
2381
  @staticmethod
2382
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2383
    assert ht.TList(mods)
2384
    assert not mods or len(mods[0]) in (2, 3)
2385

    
2386
    if mods and len(mods[0]) == 2:
2387
      result = []
2388

    
2389
      addremove = 0
2390
      for op, params in mods:
2391
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2392
          result.append((op, -1, params))
2393
          addremove += 1
2394

    
2395
          if addremove > 1:
2396
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2397
                                       " supported at a time" % kind,
2398
                                       errors.ECODE_INVAL)
2399
        else:
2400
          result.append((constants.DDM_MODIFY, op, params))
2401

    
2402
      assert verify_fn(result)
2403
    else:
2404
      result = mods
2405

    
2406
    return result
2407

    
2408
  @staticmethod
2409
  def _CheckMods(kind, mods, key_types, item_fn):
2410
    """Ensures requested disk/NIC modifications are valid.
2411

2412
    """
2413
    for (op, _, params) in mods:
2414
      assert ht.TDict(params)
2415

    
2416
      # If 'key_types' is an empty dict, we assume we have an
2417
      # 'ext' template and thus do not ForceDictType
2418
      if key_types:
2419
        utils.ForceDictType(params, key_types)
2420

    
2421
      if op == constants.DDM_REMOVE:
2422
        if params:
2423
          raise errors.OpPrereqError("No settings should be passed when"
2424
                                     " removing a %s" % kind,
2425
                                     errors.ECODE_INVAL)
2426
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2427
        item_fn(op, params)
2428
      else:
2429
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2430

    
2431
  def _VerifyDiskModification(self, op, params, excl_stor):
2432
    """Verifies a disk modification.
2433

2434
    """
2435
    if op == constants.DDM_ADD:
2436
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2437
      if mode not in constants.DISK_ACCESS_SET:
2438
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2439
                                   errors.ECODE_INVAL)
2440

    
2441
      size = params.get(constants.IDISK_SIZE, None)
2442
      if size is None:
2443
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2444
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2445
      size = int(size)
2446

    
2447
      params[constants.IDISK_SIZE] = size
2448
      name = params.get(constants.IDISK_NAME, None)
2449
      if name is not None and name.lower() == constants.VALUE_NONE:
2450
        params[constants.IDISK_NAME] = None
2451

    
2452
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2453

    
2454
    elif op == constants.DDM_MODIFY:
2455
      if constants.IDISK_SIZE in params:
2456
        raise errors.OpPrereqError("Disk size change not possible, use"
2457
                                   " grow-disk", errors.ECODE_INVAL)
2458

    
2459
      # Disk modification supports changing only the disk name and mode.
2460
      # Changing arbitrary parameters is allowed only for ext disk template",
2461
      if self.instance.disk_template != constants.DT_EXT:
2462
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2463

    
2464
      name = params.get(constants.IDISK_NAME, None)
2465
      if name is not None and name.lower() == constants.VALUE_NONE:
2466
        params[constants.IDISK_NAME] = None
2467

    
2468
  @staticmethod
2469
  def _VerifyNicModification(op, params):
2470
    """Verifies a network interface modification.
2471

2472
    """
2473
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2474
      ip = params.get(constants.INIC_IP, None)
2475
      name = params.get(constants.INIC_NAME, None)
2476
      req_net = params.get(constants.INIC_NETWORK, None)
2477
      link = params.get(constants.NIC_LINK, None)
2478
      mode = params.get(constants.NIC_MODE, None)
2479
      if name is not None and name.lower() == constants.VALUE_NONE:
2480
        params[constants.INIC_NAME] = None
2481
      if req_net is not None:
2482
        if req_net.lower() == constants.VALUE_NONE:
2483
          params[constants.INIC_NETWORK] = None
2484
          req_net = None
2485
        elif link is not None or mode is not None:
2486
          raise errors.OpPrereqError("If network is given"
2487
                                     " mode or link should not",
2488
                                     errors.ECODE_INVAL)
2489

    
2490
      if op == constants.DDM_ADD:
2491
        macaddr = params.get(constants.INIC_MAC, None)
2492
        if macaddr is None:
2493
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2494

    
2495
      if ip is not None:
2496
        if ip.lower() == constants.VALUE_NONE:
2497
          params[constants.INIC_IP] = None
2498
        else:
2499
          if ip.lower() == constants.NIC_IP_POOL:
2500
            if op == constants.DDM_ADD and req_net is None:
2501
              raise errors.OpPrereqError("If ip=pool, parameter network"
2502
                                         " cannot be none",
2503
                                         errors.ECODE_INVAL)
2504
          else:
2505
            if not netutils.IPAddress.IsValid(ip):
2506
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2507
                                         errors.ECODE_INVAL)
2508

    
2509
      if constants.INIC_MAC in params:
2510
        macaddr = params[constants.INIC_MAC]
2511
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2512
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2513

    
2514
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2515
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2516
                                     " modifying an existing NIC",
2517
                                     errors.ECODE_INVAL)
2518

    
2519
  def CheckArguments(self):
2520
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2521
            self.op.hvparams or self.op.beparams or self.op.os_name or
2522
            self.op.osparams or self.op.offline is not None or
2523
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2524
            self.op.instance_communication is not None):
2525
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2526

    
2527
    if self.op.hvparams:
2528
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2529
                           "hypervisor", "instance", "cluster")
2530

    
2531
    self.op.disks = self._UpgradeDiskNicMods(
2532
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2533
    self.op.nics = self._UpgradeDiskNicMods(
2534
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2535

    
2536
    if self.op.disks and self.op.disk_template is not None:
2537
      raise errors.OpPrereqError("Disk template conversion and other disk"
2538
                                 " changes not supported at the same time",
2539
                                 errors.ECODE_INVAL)
2540

    
2541
    if (self.op.disk_template and
2542
        self.op.disk_template in constants.DTS_INT_MIRROR and
2543
        self.op.remote_node is None):
2544
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2545
                                 " one requires specifying a secondary node",
2546
                                 errors.ECODE_INVAL)
2547

    
2548
    # Check NIC modifications
2549
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2550
                    self._VerifyNicModification)
2551

    
2552
    if self.op.pnode:
2553
      (self.op.pnode_uuid, self.op.pnode) = \
2554
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2555

    
2556
  def ExpandNames(self):
2557
    self._ExpandAndLockInstance()
2558
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2559
    # Can't even acquire node locks in shared mode as upcoming changes in
2560
    # Ganeti 2.6 will start to modify the node object on disk conversion
2561
    self.needed_locks[locking.LEVEL_NODE] = []
2562
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2563
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2564
    # Look node group to look up the ipolicy
2565
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2566

    
2567
  def DeclareLocks(self, level):
2568
    if level == locking.LEVEL_NODEGROUP:
2569
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2570
      # Acquire locks for the instance's nodegroups optimistically. Needs
2571
      # to be verified in CheckPrereq
2572
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2573
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2574
    elif level == locking.LEVEL_NODE:
2575
      self._LockInstancesNodes()
2576
      if self.op.disk_template and self.op.remote_node:
2577
        (self.op.remote_node_uuid, self.op.remote_node) = \
2578
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2579
                                self.op.remote_node)
2580
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2581
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2582
      # Copy node locks
2583
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2584
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2585

    
2586
  def BuildHooksEnv(self):
2587
    """Build hooks env.
2588

2589
    This runs on the master, primary and secondaries.
2590

2591
    """
2592
    args = {}
2593
    if constants.BE_MINMEM in self.be_new:
2594
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2595
    if constants.BE_MAXMEM in self.be_new:
2596
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2597
    if constants.BE_VCPUS in self.be_new:
2598
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2599
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2600
    # information at all.
2601

    
2602
    if self._new_nics is not None:
2603
      nics = []
2604

    
2605
      for nic in self._new_nics:
2606
        n = copy.deepcopy(nic)
2607
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2608
        n.nicparams = nicparams
2609
        nics.append(NICToTuple(self, n))
2610

    
2611
      args["nics"] = nics
2612

    
2613
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2614
    if self.op.disk_template:
2615
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2616
    if self.op.runtime_mem:
2617
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2618

    
2619
    return env
2620

    
2621
  def BuildHooksNodes(self):
2622
    """Build hooks nodes.
2623

2624
    """
2625
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2626
    return (nl, nl)
2627

    
2628
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2629
                              old_params, cluster, pnode_uuid):
2630

    
2631
    update_params_dict = dict([(key, params[key])
2632
                               for key in constants.NICS_PARAMETERS
2633
                               if key in params])
2634

    
2635
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2636
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2637

    
2638
    new_net_uuid = None
2639
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2640
    if new_net_uuid_or_name:
2641
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2642
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2643

    
2644
    if old_net_uuid:
2645
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2646

    
2647
    if new_net_uuid:
2648
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2649
      if not netparams:
2650
        raise errors.OpPrereqError("No netparams found for the network"
2651
                                   " %s, probably not connected" %
2652
                                   new_net_obj.name, errors.ECODE_INVAL)
2653
      new_params = dict(netparams)
2654
    else:
2655
      new_params = GetUpdatedParams(old_params, update_params_dict)
2656

    
2657
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2658

    
2659
    new_filled_params = cluster.SimpleFillNIC(new_params)
2660
    objects.NIC.CheckParameterSyntax(new_filled_params)
2661

    
2662
    new_mode = new_filled_params[constants.NIC_MODE]
2663
    if new_mode == constants.NIC_MODE_BRIDGED:
2664
      bridge = new_filled_params[constants.NIC_LINK]
2665
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2666
      if msg:
2667
        msg = "Error checking bridges on node '%s': %s" % \
2668
                (self.cfg.GetNodeName(pnode_uuid), msg)
2669
        if self.op.force:
2670
          self.warn.append(msg)
2671
        else:
2672
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2673

    
2674
    elif new_mode == constants.NIC_MODE_ROUTED:
2675
      ip = params.get(constants.INIC_IP, old_ip)
2676
      if ip is None:
2677
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2678
                                   " on a routed NIC", errors.ECODE_INVAL)
2679

    
2680
    elif new_mode == constants.NIC_MODE_OVS:
2681
      # TODO: check OVS link
2682
      self.LogInfo("OVS links are currently not checked for correctness")
2683

    
2684
    if constants.INIC_MAC in params:
2685
      mac = params[constants.INIC_MAC]
2686
      if mac is None:
2687
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2688
                                   errors.ECODE_INVAL)
2689
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2690
        # otherwise generate the MAC address
2691
        params[constants.INIC_MAC] = \
2692
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2693
      else:
2694
        # or validate/reserve the current one
2695
        try:
2696
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2697
        except errors.ReservationError:
2698
          raise errors.OpPrereqError("MAC address '%s' already in use"
2699
                                     " in cluster" % mac,
2700
                                     errors.ECODE_NOTUNIQUE)
2701
    elif new_net_uuid != old_net_uuid:
2702

    
2703
      def get_net_prefix(net_uuid):
2704
        mac_prefix = None
2705
        if net_uuid:
2706
          nobj = self.cfg.GetNetwork(net_uuid)
2707
          mac_prefix = nobj.mac_prefix
2708

    
2709
        return mac_prefix
2710

    
2711
      new_prefix = get_net_prefix(new_net_uuid)
2712
      old_prefix = get_net_prefix(old_net_uuid)
2713
      if old_prefix != new_prefix:
2714
        params[constants.INIC_MAC] = \
2715
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2716

    
2717
    # if there is a change in (ip, network) tuple
2718
    new_ip = params.get(constants.INIC_IP, old_ip)
2719
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2720
      if new_ip:
2721
        # if IP is pool then require a network and generate one IP
2722
        if new_ip.lower() == constants.NIC_IP_POOL:
2723
          if new_net_uuid:
2724
            try:
2725
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2726
            except errors.ReservationError:
2727
              raise errors.OpPrereqError("Unable to get a free IP"
2728
                                         " from the address pool",
2729
                                         errors.ECODE_STATE)
2730
            self.LogInfo("Chose IP %s from network %s",
2731
                         new_ip,
2732
                         new_net_obj.name)
2733
            params[constants.INIC_IP] = new_ip
2734
          else:
2735
            raise errors.OpPrereqError("ip=pool, but no network found",
2736
                                       errors.ECODE_INVAL)
2737
        # Reserve new IP if in the new network if any
2738
        elif new_net_uuid:
2739
          try:
2740
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2741
                               check=self.op.conflicts_check)
2742
            self.LogInfo("Reserving IP %s in network %s",
2743
                         new_ip, new_net_obj.name)
2744
          except errors.ReservationError:
2745
            raise errors.OpPrereqError("IP %s not available in network %s" %
2746
                                       (new_ip, new_net_obj.name),
2747
                                       errors.ECODE_NOTUNIQUE)
2748
        # new network is None so check if new IP is a conflicting IP
2749
        elif self.op.conflicts_check:
2750
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2751

    
2752
      # release old IP if old network is not None
2753
      if old_ip and old_net_uuid:
2754
        try:
2755
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2756
        except errors.AddressPoolError:
2757
          logging.warning("Release IP %s not contained in network %s",
2758
                          old_ip, old_net_obj.name)
2759

    
2760
    # there are no changes in (ip, network) tuple and old network is not None
2761
    elif (old_net_uuid is not None and
2762
          (req_link is not None or req_mode is not None)):
2763
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2764
                                 " a NIC that is connected to a network",
2765
                                 errors.ECODE_INVAL)
2766

    
2767
    private.params = new_params
2768
    private.filled = new_filled_params
2769

    
2770
  def _PreCheckDiskTemplate(self, pnode_info):
2771
    """CheckPrereq checks related to a new disk template."""
2772
    # Arguments are passed to avoid configuration lookups
2773
    pnode_uuid = self.instance.primary_node
2774
    if self.instance.disk_template == self.op.disk_template:
2775
      raise errors.OpPrereqError("Instance already has disk template %s" %
2776
                                 self.instance.disk_template,
2777
                                 errors.ECODE_INVAL)
2778

    
2779
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2780
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2781
                                 " cluster." % self.op.disk_template)
2782

    
2783
    if (self.instance.disk_template,
2784
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2785
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2786
                                 " %s to %s" % (self.instance.disk_template,
2787
                                                self.op.disk_template),
2788
                                 errors.ECODE_INVAL)
2789
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2790
                       msg="cannot change disk template")
2791
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2792
      if self.op.remote_node_uuid == pnode_uuid:
2793
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2794
                                   " as the primary node of the instance" %
2795
                                   self.op.remote_node, errors.ECODE_STATE)
2796
      CheckNodeOnline(self, self.op.remote_node_uuid)
2797
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2798
      # FIXME: here we assume that the old instance type is DT_PLAIN
2799
      assert self.instance.disk_template == constants.DT_PLAIN
2800
      disks = [{constants.IDISK_SIZE: d.size,
2801
                constants.IDISK_VG: d.logical_id[0]}
2802
               for d in self.instance.disks]
2803
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2804
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2805

    
2806
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2807
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2808
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2809
                                                              snode_group)
2810
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2811
                             ignore=self.op.ignore_ipolicy)
2812
      if pnode_info.group != snode_info.group:
2813
        self.LogWarning("The primary and secondary nodes are in two"
2814
                        " different node groups; the disk parameters"
2815
                        " from the first disk's node group will be"
2816
                        " used")
2817

    
2818
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2819
      # Make sure none of the nodes require exclusive storage
2820
      nodes = [pnode_info]
2821
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2822
        assert snode_info
2823
        nodes.append(snode_info)
2824
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2825
      if compat.any(map(has_es, nodes)):
2826
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2827
                  " storage is enabled" % (self.instance.disk_template,
2828
                                           self.op.disk_template))
2829
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2830

    
2831
  def _PreCheckDisks(self, ispec):
2832
    """CheckPrereq checks related to disk changes.
2833

2834
    @type ispec: dict
2835
    @param ispec: instance specs to be updated with the new disks
2836

2837
    """
2838
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2839

    
2840
    excl_stor = compat.any(
2841
      rpc.GetExclusiveStorageForNodes(self.cfg,
2842
                                      self.instance.all_nodes).values()
2843
      )
2844

    
2845
    # Check disk modifications. This is done here and not in CheckArguments
2846
    # (as with NICs), because we need to know the instance's disk template
2847
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2848
    if self.instance.disk_template == constants.DT_EXT:
2849
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2850
    else:
2851
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2852
                      ver_fn)
2853

    
2854
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2855

    
2856
    # Check the validity of the `provider' parameter
2857
    if self.instance.disk_template in constants.DT_EXT:
2858
      for mod in self.diskmod:
2859
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2860
        if mod[0] == constants.DDM_ADD:
2861
          if ext_provider is None:
2862
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2863
                                       " '%s' missing, during disk add" %
2864
                                       (constants.DT_EXT,
2865
                                        constants.IDISK_PROVIDER),
2866
                                       errors.ECODE_NOENT)
2867
        elif mod[0] == constants.DDM_MODIFY:
2868
          if ext_provider:
2869
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2870
                                       " modification" %
2871
                                       constants.IDISK_PROVIDER,
2872
                                       errors.ECODE_INVAL)
2873
    else:
2874
      for mod in self.diskmod:
2875
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2876
        if ext_provider is not None:
2877
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2878
                                     " instances of type '%s'" %
2879
                                     (constants.IDISK_PROVIDER,
2880
                                      constants.DT_EXT),
2881
                                     errors.ECODE_INVAL)
2882

    
2883
    if not self.op.wait_for_sync and self.instance.disks_active:
2884
      for mod in self.diskmod:
2885
        if mod[0] == constants.DDM_ADD:
2886
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2887
                                     " activated disks and"
2888
                                     " --no-wait-for-sync given.",
2889
                                     errors.ECODE_INVAL)
2890

    
2891
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2892
      raise errors.OpPrereqError("Disk operations not supported for"
2893
                                 " diskless instances", errors.ECODE_INVAL)
2894

    
2895
    def _PrepareDiskMod(_, disk, params, __):
2896
      disk.name = params.get(constants.IDISK_NAME, None)
2897

    
2898
    # Verify disk changes (operating on a copy)
2899
    disks = copy.deepcopy(self.instance.disks)
2900
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2901
                        _PrepareDiskMod, None)
2902
    utils.ValidateDeviceNames("disk", disks)
2903
    if len(disks) > constants.MAX_DISKS:
2904
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2905
                                 " more" % constants.MAX_DISKS,
2906
                                 errors.ECODE_STATE)
2907
    disk_sizes = [disk.size for disk in self.instance.disks]
2908
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2909
                      self.diskmod if op == constants.DDM_ADD)
2910
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2911
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2912

    
2913
    if self.op.offline is not None and self.op.offline:
2914
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2915
                         msg="can't change to offline")
2916

    
2917
  @staticmethod
2918
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2919
    """Create a NIC mod that adds or removes the instance
2920
    communication NIC to a running instance.
2921

2922
    The NICS are dynamically created using the Dynamic Device
2923
    Modification (DDM).  This function produces a NIC modification
2924
    (mod) that inserts an additional NIC meant for instance
2925
    communication in or removes an existing instance communication NIC
2926
    from a running instance, using DDM.
2927

2928
    @type cfg: L{config.ConfigWriter}
2929
    @param cfg: cluster configuration
2930

2931
    @type instance_communication: boolean
2932
    @param instance_communication: whether instance communication is
2933
                                   enabled or disabled
2934

2935
    @type instance: L{objects.Instance}
2936
    @param instance: instance to which the NIC mod will be applied to
2937

2938
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
2939
            (L{constants.DDM_REMOVE}, -1, parameters) or
2940
            L{None}
2941
    @return: DDM mod containing an action to add or remove the NIC, or
2942
             None if nothing needs to be done
2943

2944
    """
2945
    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
2946

    
2947
    instance_communication_nic = None
2948

    
2949
    for nic in instance.nics:
2950
      if nic.name == nic_name:
2951
        instance_communication_nic = nic
2952
        break
2953

    
2954
    if instance_communication and not instance_communication_nic:
2955
      action = constants.DDM_ADD
2956
      params = {constants.INIC_NAME: nic_name,
2957
                constants.INIC_MAC: constants.VALUE_GENERATE,
2958
                constants.INIC_IP: constants.NIC_IP_POOL,
2959
                constants.INIC_NETWORK:
2960
                  cfg.GetInstanceCommunicationNetwork()}
2961
    elif not instance_communication and instance_communication_nic:
2962
      action = constants.DDM_REMOVE
2963
      params = None
2964
    else:
2965
      action = None
2966
      params = None
2967

    
2968
    if action is not None:
2969
      return (action, -1, params)
2970
    else:
2971
      return None
2972

    
2973
  def CheckPrereq(self):
2974
    """Check prerequisites.
2975

2976
    This only checks the instance list against the existing names.
2977

2978
    """
2979
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2980
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2981
    self.cluster = self.cfg.GetClusterInfo()
2982
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2983

    
2984
    assert self.instance is not None, \
2985
      "Cannot retrieve locked instance %s" % self.op.instance_name
2986

    
2987
    pnode_uuid = self.instance.primary_node
2988

    
2989
    self.warn = []
2990

    
2991
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2992
        not self.op.force):
2993
      # verify that the instance is not up
2994
      instance_info = self.rpc.call_instance_info(
2995
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2996
          cluster_hvparams)
2997
      if instance_info.fail_msg:
2998
        self.warn.append("Can't get instance runtime information: %s" %
2999
                         instance_info.fail_msg)
3000
      elif instance_info.payload:
3001
        raise errors.OpPrereqError("Instance is still running on %s" %
3002
                                   self.cfg.GetNodeName(pnode_uuid),
3003
                                   errors.ECODE_STATE)
3004

    
3005
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3006
    node_uuids = list(self.instance.all_nodes)
3007
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3008

    
3009
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3010
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3011
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3012

    
3013
    # dictionary with instance information after the modification
3014
    ispec = {}
3015

    
3016
    if self.op.hotplug or self.op.hotplug_if_possible:
3017
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3018
                                               self.instance)
3019
      if result.fail_msg:
3020
        if self.op.hotplug:
3021
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3022
                       prereq=True)
3023
        else:
3024
          self.LogWarning(result.fail_msg)
3025
          self.op.hotplug = False
3026
          self.LogInfo("Modification will take place without hotplugging.")
3027
      else:
3028
        self.op.hotplug = True
3029

    
3030
    # Prepare NIC modifications
3031
    # add or remove NIC for instance communication
3032
    if self.op.instance_communication is not None:
3033
      mod = self._InstanceCommunicationDDM(self.cfg,
3034
                                           self.op.instance_communication,
3035
                                           self.instance)
3036
      if mod is not None:
3037
        self.op.nics.append(mod)
3038

    
3039
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3040

    
3041
    # OS change
3042
    if self.op.os_name and not self.op.force:
3043
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
3044
                     self.op.force_variant)
3045
      instance_os = self.op.os_name
3046
    else:
3047
      instance_os = self.instance.os
3048

    
3049
    assert not (self.op.disk_template and self.op.disks), \
3050
      "Can't modify disk template and apply disk changes at the same time"
3051

    
3052
    if self.op.disk_template:
3053
      self._PreCheckDiskTemplate(pnode_info)
3054

    
3055
    self._PreCheckDisks(ispec)
3056

    
3057
    # hvparams processing
3058
    if self.op.hvparams:
3059
      hv_type = self.instance.hypervisor
3060
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3061
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3062
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3063

    
3064
      # local check
3065
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3066
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3067
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3068
      self.hv_inst = i_hvdict # the new dict (without defaults)
3069
    else:
3070
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3071
                                                   self.instance.os,
3072
                                                   self.instance.hvparams)
3073
      self.hv_new = self.hv_inst = {}
3074

    
3075
    # beparams processing
3076
    if self.op.beparams:
3077
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3078
                                  use_none=True)
3079
      objects.UpgradeBeParams(i_bedict)
3080
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3081
      be_new = self.cluster.SimpleFillBE(i_bedict)
3082
      self.be_proposed = self.be_new = be_new # the new actual values
3083
      self.be_inst = i_bedict # the new dict (without defaults)
3084
    else:
3085
      self.be_new = self.be_inst = {}
3086
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3087
    be_old = self.cluster.FillBE(self.instance)
3088

    
3089
    # CPU param validation -- checking every time a parameter is
3090
    # changed to cover all cases where either CPU mask or vcpus have
3091
    # changed
3092
    if (constants.BE_VCPUS in self.be_proposed and
3093
        constants.HV_CPU_MASK in self.hv_proposed):
3094
      cpu_list = \
3095
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3096
      # Verify mask is consistent with number of vCPUs. Can skip this
3097
      # test if only 1 entry in the CPU mask, which means same mask
3098
      # is applied to all vCPUs.
3099
      if (len(cpu_list) > 1 and
3100
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3101
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3102
                                   " CPU mask [%s]" %
3103
                                   (self.be_proposed[constants.BE_VCPUS],
3104
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3105
                                   errors.ECODE_INVAL)
3106

    
3107
      # Only perform this test if a new CPU mask is given
3108
      if constants.HV_CPU_MASK in self.hv_new:
3109
        # Calculate the largest CPU number requested
3110
        max_requested_cpu = max(map(max, cpu_list))
3111
        # Check that all of the instance's nodes have enough physical CPUs to
3112
        # satisfy the requested CPU mask
3113
        hvspecs = [(self.instance.hypervisor,
3114
                    self.cfg.GetClusterInfo()
3115
                      .hvparams[self.instance.hypervisor])]
3116
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3117
                                max_requested_cpu + 1,
3118
                                hvspecs)
3119

    
3120
    # osparams processing
3121
    if self.op.osparams or self.op.osparams_private:
3122
      public_parms = self.op.osparams or {}
3123
      private_parms = self.op.osparams_private or {}
3124
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3125

    
3126
      if dupe_keys:
3127
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3128
                                   utils.CommaJoin(dupe_keys))
3129

    
3130
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3131
                                      public_parms)
3132
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3133
                                              private_parms)
3134

    
3135
      CheckOSParams(self, True, node_uuids, instance_os,
3136
                    objects.FillDict(self.os_inst,
3137
                                     self.os_inst_private))
3138

    
3139
    else:
3140
      self.os_inst = {}
3141
      self.os_inst_private = {}
3142

    
3143
    #TODO(dynmem): do the appropriate check involving MINMEM
3144
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3145
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3146
      mem_check_list = [pnode_uuid]
3147
      if be_new[constants.BE_AUTO_BALANCE]:
3148
        # either we changed auto_balance to yes or it was from before
3149
        mem_check_list.extend(
3150
          self.cfg.GetInstanceSecondaryNodes(self.instance))
3151
      instance_info = self.rpc.call_instance_info(
3152
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3153
          cluster_hvparams)
3154
      hvspecs = [(self.instance.hypervisor,
3155
                  cluster_hvparams)]
3156
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3157
                                         hvspecs)
3158
      pninfo = nodeinfo[pnode_uuid]
3159
      msg = pninfo.fail_msg
3160
      if msg:
3161
        # Assume the primary node is unreachable and go ahead
3162
        self.warn.append("Can't get info from primary node %s: %s" %
3163
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3164
      else:
3165
        (_, _, (pnhvinfo, )) = pninfo.payload
3166
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3167
          self.warn.append("Node data from primary node %s doesn't contain"
3168
                           " free memory information" %
3169
                           self.cfg.GetNodeName(pnode_uuid))
3170
        elif instance_info.fail_msg:
3171
          self.warn.append("Can't get instance runtime information: %s" %
3172
                           instance_info.fail_msg)
3173
        else:
3174
          if instance_info.payload:
3175
            current_mem = int(instance_info.payload["memory"])
3176
          else:
3177
            # Assume instance not running
3178
            # (there is a slight race condition here, but it's not very
3179
            # probable, and we have no other way to check)
3180
            # TODO: Describe race condition
3181
            current_mem = 0
3182
          #TODO(dynmem): do the appropriate check involving MINMEM
3183
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3184
                      pnhvinfo["memory_free"])
3185
          if miss_mem > 0:
3186
            raise errors.OpPrereqError("This change will prevent the instance"
3187
                                       " from starting, due to %d MB of memory"
3188
                                       " missing on its primary node" %
3189
                                       miss_mem, errors.ECODE_NORES)
3190

    
3191
      if be_new[constants.BE_AUTO_BALANCE]:
3192
        secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
3193
        for node_uuid, nres in nodeinfo.items():
3194
          if node_uuid not in secondary_nodes:
3195
            continue
3196
          nres.Raise("Can't get info from secondary node %s" %
3197
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3198
                     ecode=errors.ECODE_STATE)
3199
          (_, _, (nhvinfo, )) = nres.payload
3200
          if not isinstance(nhvinfo.get("memory_free", None), int):
3201
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3202
                                       " memory information" %
3203
                                       self.cfg.GetNodeName(node_uuid),
3204
                                       errors.ECODE_STATE)
3205
          #TODO(dynmem): do the appropriate check involving MINMEM
3206
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3207
            raise errors.OpPrereqError("This change will prevent the instance"
3208
                                       " from failover to its secondary node"
3209
                                       " %s, due to not enough memory" %
3210
                                       self.cfg.GetNodeName(node_uuid),
3211
                                       errors.ECODE_STATE)
3212

    
3213
    if self.op.runtime_mem:
3214
      remote_info = self.rpc.call_instance_info(
3215
         self.instance.primary_node, self.instance.name,
3216
         self.instance.hypervisor,
3217
         cluster_hvparams)
3218
      remote_info.Raise("Error checking node %s" %
3219
                        self.cfg.GetNodeName(self.instance.primary_node))
3220
      if not remote_info.payload: # not running already
3221
        raise errors.OpPrereqError("Instance %s is not running" %
3222
                                   self.instance.name, errors.ECODE_STATE)
3223

    
3224
      current_memory = remote_info.payload["memory"]
3225
      if (not self.op.force and
3226
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3227
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3228
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3229
                                   " and %d MB of memory unless --force is"
3230
                                   " given" %
3231
                                   (self.instance.name,
3232
                                    self.be_proposed[constants.BE_MINMEM],
3233
                                    self.be_proposed[constants.BE_MAXMEM]),
3234
                                   errors.ECODE_INVAL)
3235

    
3236
      delta = self.op.runtime_mem - current_memory
3237
      if delta > 0:
3238
        CheckNodeFreeMemory(
3239
            self, self.instance.primary_node,
3240
            "ballooning memory for instance %s" % self.instance.name, delta,
3241
            self.instance.hypervisor,
3242
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3243

    
3244
    # make self.cluster visible in the functions below
3245
    cluster = self.cluster
3246

    
3247
    def _PrepareNicCreate(_, params, private):
3248
      self._PrepareNicModification(params, private, None, None,
3249
                                   {}, cluster, pnode_uuid)
3250
      return (None, None)
3251

    
3252
    def _PrepareNicMod(_, nic, params, private):
3253
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3254
                                   nic.nicparams, cluster, pnode_uuid)
3255
      return None
3256

    
3257
    def _PrepareNicRemove(_, params, __):
3258
      ip = params.ip
3259
      net = params.network
3260
      if net is not None and ip is not None:
3261
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3262

    
3263
    # Verify NIC changes (operating on copy)
3264
    nics = [nic.Copy() for nic in self.instance.nics]
3265
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3266
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3267
    if len(nics) > constants.MAX_NICS:
3268
      raise errors.OpPrereqError("Instance has too many network interfaces"
3269
                                 " (%d), cannot add more" % constants.MAX_NICS,
3270
                                 errors.ECODE_STATE)
3271

    
3272
    # Pre-compute NIC changes (necessary to use result in hooks)
3273
    self._nic_chgdesc = []
3274
    if self.nicmod:
3275
      # Operate on copies as this is still in prereq
3276
      nics = [nic.Copy() for nic in self.instance.nics]
3277
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3278
                          self._CreateNewNic, self._ApplyNicMods,
3279
                          self._RemoveNic)
3280
      # Verify that NIC names are unique and valid
3281
      utils.ValidateDeviceNames("NIC", nics)
3282
      self._new_nics = nics
3283
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3284
    else:
3285
      self._new_nics = None
3286
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3287

    
3288
    if not self.op.ignore_ipolicy:
3289
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3290
                                                              group_info)
3291

    
3292
      # Fill ispec with backend parameters
3293
      ispec[constants.ISPEC_SPINDLE_USE] = \
3294
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3295
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3296
                                                         None)
3297

    
3298
      # Copy ispec to verify parameters with min/max values separately
3299
      if self.op.disk_template:
3300
        new_disk_template = self.op.disk_template
3301
      else:
3302
        new_disk_template = self.instance.disk_template
3303
      ispec_max = ispec.copy()
3304
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3305
        self.be_new.get(constants.BE_MAXMEM, None)
3306
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3307
                                                     new_disk_template)
3308
      ispec_min = ispec.copy()
3309
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3310
        self.be_new.get(constants.BE_MINMEM, None)
3311
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3312
                                                     new_disk_template)
3313

    
3314
      if (res_max or res_min):
3315
        # FIXME: Improve error message by including information about whether
3316
        # the upper or lower limit of the parameter fails the ipolicy.
3317
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3318
               (group_info, group_info.name,
3319
                utils.CommaJoin(set(res_max + res_min))))
3320
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3321

    
3322
  def _ConvertPlainToDrbd(self, feedback_fn):
3323
    """Converts an instance from plain to drbd.
3324

3325
    """
3326
    feedback_fn("Converting template to drbd")
3327
    pnode_uuid = self.instance.primary_node
3328
    snode_uuid = self.op.remote_node_uuid
3329

    
3330
    assert self.instance.disk_template == constants.DT_PLAIN
3331

    
3332
    # create a fake disk info for _GenerateDiskTemplate
3333
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3334
                  constants.IDISK_VG: d.logical_id[0],
3335
                  constants.IDISK_NAME: d.name}
3336
                 for d in self.instance.disks]
3337
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3338
                                     self.instance.uuid, pnode_uuid,
3339
                                     [snode_uuid], disk_info, None, None, 0,
3340
                                     feedback_fn, self.diskparams)
3341
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3342
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3343
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3344
    info = GetInstanceInfoText(self.instance)
3345
    feedback_fn("Creating additional volumes...")
3346
    # first, create the missing data and meta devices
3347
    for disk in anno_disks:
3348
      # unfortunately this is... not too nice
3349
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3350
                           info, True, p_excl_stor)
3351
      for child in disk.children:
3352
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3353
                             s_excl_stor)
3354
    # at this stage, all new LVs have been created, we can rename the
3355
    # old ones
3356
    feedback_fn("Renaming original volumes...")
3357
    rename_list = [(o, n.children[0].logical_id)
3358
                   for (o, n) in zip(self.instance.disks, new_disks)]
3359
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3360
    result.Raise("Failed to rename original LVs")
3361

    
3362
    feedback_fn("Initializing DRBD devices...")
3363
    # all child devices are in place, we can now create the DRBD devices
3364
    try:
3365
      for disk in anno_disks:
3366
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3367
                                       (snode_uuid, s_excl_stor)]:
3368
          f_create = node_uuid == pnode_uuid
3369
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3370
                               f_create, excl_stor)
3371
    except errors.GenericError, e:
3372
      feedback_fn("Initializing of DRBD devices failed;"
3373
                  " renaming back original volumes...")
3374
      rename_back_list = [(n.children[0], o.logical_id)
3375
                          for (n, o) in zip(new_disks, self.instance.disks)]
3376
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3377
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3378
      raise
3379

    
3380
    # at this point, the instance has been modified
3381
    self.instance.disk_template = constants.DT_DRBD8
3382
    self.instance.disks = new_disks
3383
    self.cfg.Update(self.instance, feedback_fn)
3384

    
3385
    # Release node locks while waiting for sync
3386
    ReleaseLocks(self, locking.LEVEL_NODE)
3387

    
3388
    # disks are created, waiting for sync
3389
    disk_abort = not WaitForSync(self, self.instance,
3390
                                 oneshot=not self.op.wait_for_sync)
3391
    if disk_abort:
3392
      raise errors.OpExecError("There are some degraded disks for"
3393
                               " this instance, please cleanup manually")
3394

    
3395
    # Node resource locks will be released by caller
3396

    
3397
  def _ConvertDrbdToPlain(self, feedback_fn):
3398
    """Converts an instance from drbd to plain.
3399

3400
    """
3401
    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
3402
    assert len(secondary_nodes) == 1
3403
    assert self.instance.disk_template == constants.DT_DRBD8
3404

    
3405
    pnode_uuid = self.instance.primary_node
3406
    snode_uuid = secondary_nodes[0]
3407
    feedback_fn("Converting template to plain")
3408

    
3409
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3410
    new_disks = [d.children[0] for d in self.instance.disks]
3411

    
3412
    # copy over size, mode and name
3413
    for parent, child in zip(old_disks, new_disks):
3414
      child.size = parent.size
3415
      child.mode = parent.mode
3416
      child.name = parent.name
3417

    
3418
    # this is a DRBD disk, return its port to the pool
3419
    # NOTE: this must be done right before the call to cfg.Update!
3420
    for disk in old_disks:
3421
      tcp_port = disk.logical_id[2]
3422
      self.cfg.AddTcpUdpPort(tcp_port)
3423

    
3424
    # update instance structure
3425
    self.instance.disks = new_disks
3426
    self.instance.disk_template = constants.DT_PLAIN
3427
    _UpdateIvNames(0, self.instance.disks)
3428
    self.cfg.Update(self.instance, feedback_fn)
3429

    
3430
    # Release locks in case removing disks takes a while
3431
    ReleaseLocks(self, locking.LEVEL_NODE)
3432

    
3433
    feedback_fn("Removing volumes on the secondary node...")
3434
    for disk in old_disks:
3435
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3436
      result.Warn("Could not remove block device %s on node %s,"
3437
                  " continuing anyway" %
3438
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3439
                  self.LogWarning)
3440

    
3441
    feedback_fn("Removing unneeded volumes on the primary node...")
3442
    for idx, disk in enumerate(old_disks):
3443
      meta = disk.children[1]
3444
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3445
      result.Warn("Could not remove metadata for disk %d on node %s,"
3446
                  " continuing anyway" %
3447
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3448
                  self.LogWarning)
3449

    
3450
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3451
    self.LogInfo("Trying to hotplug device...")
3452
    msg = "hotplug:"
3453
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3454
                                          self.instance, action, dev_type,
3455
                                          (device, self.instance),
3456
                                          extra, seq)
3457
    if result.fail_msg:
3458
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3459
      self.LogInfo("Continuing execution..")
3460
      msg += "failed"
3461
    else:
3462
      self.LogInfo("Hotplug done.")
3463
      msg += "done"
3464
    return msg
3465

    
3466
  def _CreateNewDisk(self, idx, params, _):
3467
    """Creates a new disk.
3468

3469
    """
3470
    # add a new disk
3471
    if self.instance.disk_template in constants.DTS_FILEBASED:
3472
      (file_driver, file_path) = self.instance.disks[0].logical_id
3473
      file_path = os.path.dirname(file_path)
3474
    else:
3475
      file_driver = file_path = None
3476

    
3477
    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
3478
    disk = \
3479
      GenerateDiskTemplate(self, self.instance.disk_template,
3480
                           self.instance.uuid, self.instance.primary_node,
3481
                           secondary_nodes, [params], file_path,
3482
                           file_driver, idx, self.Log, self.diskparams)[0]
3483

    
3484
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3485

    
3486
    if self.cluster.prealloc_wipe_disks:
3487
      # Wipe new disk
3488
      WipeOrCleanupDisks(self, self.instance,
3489
                         disks=[(idx, disk, 0)],
3490
                         cleanup=new_disks)
3491

    
3492
    changes = [
3493
      ("disk/%d" % idx,
3494
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3495
      ]
3496
    if self.op.hotplug:
3497
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3498
                                               (disk, self.instance),
3499
                                               self.instance.name, True, idx)
3500
      if result.fail_msg:
3501
        changes.append(("disk/%d" % idx, "assemble:failed"))
3502
        self.LogWarning("Can't assemble newly created disk %d: %s",
3503
                        idx, result.fail_msg)
3504
      else:
3505
        _, link_name = result.payload
3506
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3507
                                  constants.HOTPLUG_TARGET_DISK,
3508
                                  disk, link_name, idx)
3509
        changes.append(("disk/%d" % idx, msg))
3510

    
3511
    return (disk, changes)
3512

    
3513
  def _PostAddDisk(self, _, disk):
3514
    if not WaitForSync(self, self.instance, disks=[disk],
3515
                       oneshot=not self.op.wait_for_sync):
3516
      raise errors.OpExecError("Failed to sync disks of %s" %
3517
                               self.instance.name)
3518

    
3519
    # the disk is active at this point, so deactivate it if the instance disks
3520
    # are supposed to be inactive
3521
    if not self.instance.disks_active:
3522
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3523

    
3524
  def _ModifyDisk(self, idx, disk, params, _):
3525
    """Modifies a disk.
3526

3527
    """
3528
    changes = []
3529
    if constants.IDISK_MODE in params:
3530
      disk.mode = params.get(constants.IDISK_MODE)
3531
      changes.append(("disk.mode/%d" % idx, disk.mode))
3532

    
3533
    if constants.IDISK_NAME in params:
3534
      disk.name = params.get(constants.IDISK_NAME)
3535
      changes.append(("disk.name/%d" % idx, disk.name))
3536

    
3537
    # Modify arbitrary params in case instance template is ext
3538
    for key, value in params.iteritems():
3539
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3540
          self.instance.disk_template == constants.DT_EXT):
3541
        # stolen from GetUpdatedParams: default means reset/delete
3542
        if value.lower() == constants.VALUE_DEFAULT:
3543
          try:
3544
            del disk.params[key]
3545
          except KeyError:
3546
            pass
3547
        else:
3548
          disk.params[key] = value
3549
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3550

    
3551
    return changes
3552

    
3553
  def _RemoveDisk(self, idx, root, _):
3554
    """Removes a disk.
3555

3556
    """
3557
    hotmsg = ""
3558
    if self.op.hotplug:
3559
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3560
                                   constants.HOTPLUG_TARGET_DISK,
3561
                                   root, None, idx)
3562
      ShutdownInstanceDisks(self, self.instance, [root])
3563

    
3564
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3565
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3566
                             self.instance.primary_node):
3567
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3568
              .fail_msg
3569
      if msg:
3570
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3571
                        " continuing anyway", idx,
3572
                        self.cfg.GetNodeName(node_uuid), msg)
3573

    
3574
    # if this is a DRBD disk, return its port to the pool
3575
    if root.dev_type in constants.DTS_DRBD:
3576
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3577

    
3578
    return hotmsg
3579

    
3580
  def _CreateNewNic(self, idx, params, private):
3581
    """Creates data structure for a new network interface.
3582

3583
    """
3584
    mac = params[constants.INIC_MAC]
3585
    ip = params.get(constants.INIC_IP, None)
3586
    net = params.get(constants.INIC_NETWORK, None)
3587
    name = params.get(constants.INIC_NAME, None)
3588
    net_uuid = self.cfg.LookupNetwork(net)
3589
    #TODO: not private.filled?? can a nic have no nicparams??
3590
    nicparams = private.filled
3591
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3592
                       nicparams=nicparams)
3593
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3594

    
3595
    changes = [
3596
      ("nic.%d" % idx,
3597
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3598
       (mac, ip, private.filled[constants.NIC_MODE],
3599
       private.filled[constants.NIC_LINK], net)),
3600
      ]
3601

    
3602
    if self.op.hotplug:
3603
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3604
                                constants.HOTPLUG_TARGET_NIC,
3605
                                nobj, None, idx)
3606
      changes.append(("nic.%d" % idx, msg))
3607

    
3608
    return (nobj, changes)
3609

    
3610
  def _ApplyNicMods(self, idx, nic, params, private):
3611
    """Modifies a network interface.
3612

3613
    """
3614
    changes = []
3615

    
3616
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3617
      if key in params:
3618
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3619
        setattr(nic, key, params[key])
3620

    
3621
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3622
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3623
    if new_net_uuid != nic.network:
3624
      changes.append(("nic.network/%d" % idx, new_net))
3625
      nic.network = new_net_uuid
3626

    
3627
    if private.filled:
3628
      nic.nicparams = private.filled
3629

    
3630
      for (key, val) in nic.nicparams.items():
3631
        changes.append(("nic.%s/%d" % (key, idx), val))
3632

    
3633
    if self.op.hotplug:
3634
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3635
                                constants.HOTPLUG_TARGET_NIC,
3636
                                nic, None, idx)
3637
      changes.append(("nic/%d" % idx, msg))
3638

    
3639
    return changes
3640

    
3641
  def _RemoveNic(self, idx, nic, _):
3642
    if self.op.hotplug:
3643
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3644
                                 constants.HOTPLUG_TARGET_NIC,
3645
                                 nic, None, idx)
3646

    
3647
  def Exec(self, feedback_fn):
3648
    """Modifies an instance.
3649

3650
    All parameters take effect only at the next restart of the instance.
3651

3652
    """
3653
    # Process here the warnings from CheckPrereq, as we don't have a
3654
    # feedback_fn there.
3655
    # TODO: Replace with self.LogWarning
3656
    for warn in self.warn:
3657
      feedback_fn("WARNING: %s" % warn)
3658

    
3659
    assert ((self.op.disk_template is None) ^
3660
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3661
      "Not owning any node resource locks"
3662

    
3663
    result = []
3664

    
3665
    # New primary node
3666
    if self.op.pnode_uuid:
3667
      self.instance.primary_node = self.op.pnode_uuid
3668

    
3669
    # runtime memory
3670
    if self.op.runtime_mem:
3671
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3672
                                                     self.instance,
3673
                                                     self.op.runtime_mem)
3674
      rpcres.Raise("Cannot modify instance runtime memory")
3675
      result.append(("runtime_memory", self.op.runtime_mem))
3676

    
3677
    # Apply disk changes
3678
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3679
                        self._CreateNewDisk, self._ModifyDisk,
3680
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3681
    _UpdateIvNames(0, self.instance.disks)
3682

    
3683
    if self.op.disk_template:
3684
      if __debug__:
3685
        check_nodes = set(self.instance.all_nodes)
3686
        if self.op.remote_node_uuid:
3687
          check_nodes.add(self.op.remote_node_uuid)
3688
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3689
          owned = self.owned_locks(level)
3690
          assert not (check_nodes - owned), \
3691
            ("Not owning the correct locks, owning %r, expected at least %r" %
3692
             (owned, check_nodes))
3693

    
3694
      r_shut = ShutdownInstanceDisks(self, self.instance)
3695
      if not r_shut:
3696
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3697
                                 " proceed with disk template conversion")
3698
      mode = (self.instance.disk_template, self.op.disk_template)
3699
      try:
3700
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3701
      except:
3702
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3703
        raise
3704
      result.append(("disk_template", self.op.disk_template))
3705

    
3706
      assert self.instance.disk_template == self.op.disk_template, \
3707
        ("Expected disk template '%s', found '%s'" %
3708
         (self.op.disk_template, self.instance.disk_template))
3709

    
3710
    # Release node and resource locks if there are any (they might already have
3711
    # been released during disk conversion)
3712
    ReleaseLocks(self, locking.LEVEL_NODE)
3713
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3714

    
3715
    # Apply NIC changes
3716
    if self._new_nics is not None:
3717
      self.instance.nics = self._new_nics
3718
      result.extend(self._nic_chgdesc)
3719

    
3720
    # hvparams changes
3721
    if self.op.hvparams:
3722
      self.instance.hvparams = self.hv_inst
3723
      for key, val in self.op.hvparams.iteritems():
3724
        result.append(("hv/%s" % key, val))
3725

    
3726
    # beparams changes
3727
    if self.op.beparams:
3728
      self.instance.beparams = self.be_inst
3729
      for key, val in self.op.beparams.iteritems():
3730
        result.append(("be/%s" % key, val))
3731

    
3732
    # OS change
3733
    if self.op.os_name:
3734
      self.instance.os = self.op.os_name
3735

    
3736
    # osparams changes
3737
    if self.op.osparams:
3738
      self.instance.osparams = self.os_inst
3739
      for key, val in self.op.osparams.iteritems():
3740
        result.append(("os/%s" % key, val))
3741

    
3742
    if self.op.osparams_private:
3743
      self.instance.osparams_private = self.os_inst_private
3744
      for key, val in self.op.osparams_private.iteritems():
3745
        # Show the Private(...) blurb.
3746
        result.append(("os_private/%s" % key, repr(val)))
3747

    
3748
    if self.op.offline is None:
3749
      # Ignore
3750
      pass
3751
    elif self.op.offline:
3752
      # Mark instance as offline
3753
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3754
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3755
    else:
3756
      # Mark instance as online, but stopped
3757
      self.cfg.MarkInstanceDown(self.instance.uuid)
3758
      result.append(("admin_state", constants.ADMINST_DOWN))
3759

    
3760
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3761

    
3762
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3763
                self.owned_locks(locking.LEVEL_NODE)), \
3764
      "All node locks should have been released by now"
3765

    
3766
    return result
3767

    
3768
  _DISK_CONVERSIONS = {
3769
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3770
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3771
    }
3772

    
3773

    
3774
class LUInstanceChangeGroup(LogicalUnit):
3775
  HPATH = "instance-change-group"
3776
  HTYPE = constants.HTYPE_INSTANCE
3777
  REQ_BGL = False
3778

    
3779
  def ExpandNames(self):
3780
    self.share_locks = ShareAll()
3781

    
3782
    self.needed_locks = {
3783
      locking.LEVEL_NODEGROUP: [],
3784
      locking.LEVEL_NODE: [],
3785
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3786
      }
3787

    
3788
    self._ExpandAndLockInstance()
3789

    
3790
    if self.op.target_groups:
3791
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3792
                                  self.op.target_groups)
3793
    else:
3794
      self.req_target_uuids = None
3795

    
3796
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3797

    
3798
  def DeclareLocks(self, level):
3799
    if level == locking.LEVEL_NODEGROUP:
3800
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3801

    
3802
      if self.req_target_uuids:
3803
        lock_groups = set(self.req_target_uuids)
3804

    
3805
        # Lock all groups used by instance optimistically; this requires going
3806
        # via the node before it's locked, requiring verification later on
3807
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3808
        lock_groups.update(instance_groups)
3809
      else:
3810
        # No target groups, need to lock all of them
3811
        lock_groups = locking.ALL_SET
3812

    
3813
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3814

    
3815
    elif level == locking.LEVEL_NODE:
3816
      if self.req_target_uuids:
3817
        # Lock all nodes used by instances
3818
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3819
        self._LockInstancesNodes()
3820

    
3821
        # Lock all nodes in all potential target groups
3822
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3823
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3824
        member_nodes = [node_uuid
3825
                        for group in lock_groups
3826
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3827
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3828
      else:
3829
        # Lock all nodes as all groups are potential targets
3830
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3831

    
3832
  def CheckPrereq(self):
3833
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3834
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3835
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3836

    
3837
    assert (self.req_target_uuids is None or
3838
            owned_groups.issuperset(self.req_target_uuids))
3839
    assert owned_instance_names == set([self.op.instance_name])
3840

    
3841
    # Get instance information
3842
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3843

    
3844
    # Check if node groups for locked instance are still correct
3845
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3846
      ("Instance %s's nodes changed while we kept the lock" %
3847
       self.op.instance_name)
3848

    
3849
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3850
                                          owned_groups)
3851

    
3852
    if self.req_target_uuids:
3853
      # User requested specific target groups
3854
      self.target_uuids = frozenset(self.req_target_uuids)
3855
    else:
3856
      # All groups except those used by the instance are potential targets
3857
      self.target_uuids = owned_groups - inst_groups
3858

    
3859
    conflicting_groups = self.target_uuids & inst_groups
3860
    if conflicting_groups:
3861
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3862
                                 " used by the instance '%s'" %
3863
                                 (utils.CommaJoin(conflicting_groups),
3864
                                  self.op.instance_name),
3865
                                 errors.ECODE_INVAL)
3866

    
3867
    if not self.target_uuids:
3868
      raise errors.OpPrereqError("There are no possible target groups",
3869
                                 errors.ECODE_INVAL)
3870

    
3871
  def BuildHooksEnv(self):
3872
    """Build hooks env.
3873

3874
    """
3875
    assert self.target_uuids
3876

    
3877
    env = {
3878
      "TARGET_GROUPS": " ".join(self.target_uuids),
3879
      }
3880

    
3881
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3882

    
3883
    return env
3884

    
3885
  def BuildHooksNodes(self):
3886
    """Build hooks nodes.
3887

3888
    """
3889
    mn = self.cfg.GetMasterNode()
3890
    return ([mn], [mn])
3891

    
3892
  def Exec(self, feedback_fn):
3893
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3894

    
3895
    assert instances == [self.op.instance_name], "Instance not locked"
3896

    
3897
    req = iallocator.IAReqGroupChange(instances=instances,
3898
                                      target_groups=list(self.target_uuids))
3899
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3900

    
3901
    ial.Run(self.op.iallocator)
3902

    
3903
    if not ial.success:
3904
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3905
                                 " instance '%s' using iallocator '%s': %s" %
3906
                                 (self.op.instance_name, self.op.iallocator,
3907
                                  ial.info), errors.ECODE_NORES)
3908

    
3909
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3910

    
3911
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3912
                 " instance '%s'", len(jobs), self.op.instance_name)
3913

    
3914
    return ResultWithJobs(jobs)