Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 4e7f986e

History | View | Annotate | Download (155.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
import ganeti.rpc.node as rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177
    vlan = nic.get(constants.INIC_VLAN, None)
178

    
179
    if net is None or net.lower() == constants.VALUE_NONE:
180
      net = None
181
    else:
182
      if nic_mode_req is not None or link is not None:
183
        raise errors.OpPrereqError("If network is given, no mode or link"
184
                                   " is allowed to be passed",
185
                                   errors.ECODE_INVAL)
186

    
187
    # ip validity checks
188
    if ip is None or ip.lower() == constants.VALUE_NONE:
189
      nic_ip = None
190
    elif ip.lower() == constants.VALUE_AUTO:
191
      if not op.name_check:
192
        raise errors.OpPrereqError("IP address set to auto but name checks"
193
                                   " have been skipped",
194
                                   errors.ECODE_INVAL)
195
      nic_ip = default_ip
196
    else:
197
      # We defer pool operations until later, so that the iallocator has
198
      # filled in the instance's node(s) dimara
199
      if ip.lower() == constants.NIC_IP_POOL:
200
        if net is None:
201
          raise errors.OpPrereqError("if ip=pool, parameter network"
202
                                     " must be passed too",
203
                                     errors.ECODE_INVAL)
204

    
205
      elif not netutils.IPAddress.IsValid(ip):
206
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
207
                                   errors.ECODE_INVAL)
208

    
209
      nic_ip = ip
210

    
211
    # TODO: check the ip address for uniqueness
212
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
213
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
214
                                 errors.ECODE_INVAL)
215

    
216
    # MAC address verification
217
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
218
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
219
      mac = utils.NormalizeAndValidateMac(mac)
220

    
221
      try:
222
        # TODO: We need to factor this out
223
        cfg.ReserveMAC(mac, ec_id)
224
      except errors.ReservationError:
225
        raise errors.OpPrereqError("MAC address %s already in use"
226
                                   " in cluster" % mac,
227
                                   errors.ECODE_NOTUNIQUE)
228

    
229
    #  Build nic parameters
230
    nicparams = {}
231
    if nic_mode_req:
232
      nicparams[constants.NIC_MODE] = nic_mode
233
    if link:
234
      nicparams[constants.NIC_LINK] = link
235
    if vlan:
236
      nicparams[constants.NIC_VLAN] = vlan
237

    
238
    check_params = cluster.SimpleFillNIC(nicparams)
239
    objects.NIC.CheckParameterSyntax(check_params)
240
    net_uuid = cfg.LookupNetwork(net)
241
    name = nic.get(constants.INIC_NAME, None)
242
    if name is not None and name.lower() == constants.VALUE_NONE:
243
      name = None
244
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
245
                          network=net_uuid, nicparams=nicparams)
246
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247
    nics.append(nic_obj)
248

    
249
  return nics
250

    
251

    
252
def _CheckForConflictingIp(lu, ip, node_uuid):
253
  """In case of conflicting IP address raise error.
254

255
  @type ip: string
256
  @param ip: IP address
257
  @type node_uuid: string
258
  @param node_uuid: node UUID
259

260
  """
261
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
262
  if conf_net is not None:
263
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
264
                                " network %s, but the target NIC does not." %
265
                                (ip, conf_net)),
266
                               errors.ECODE_STATE)
267

    
268
  return (None, None)
269

    
270

    
271
def _ComputeIPolicyInstanceSpecViolation(
272
  ipolicy, instance_spec, disk_template,
273
  _compute_fn=ComputeIPolicySpecViolation):
274
  """Compute if instance specs meets the specs of ipolicy.
275

276
  @type ipolicy: dict
277
  @param ipolicy: The ipolicy to verify against
278
  @param instance_spec: dict
279
  @param instance_spec: The instance spec to verify
280
  @type disk_template: string
281
  @param disk_template: the disk template of the instance
282
  @param _compute_fn: The function to verify ipolicy (unittest only)
283
  @see: L{ComputeIPolicySpecViolation}
284

285
  """
286
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
287
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
288
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
289
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
290
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
291
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
292

    
293
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
294
                     disk_sizes, spindle_use, disk_template)
295

    
296

    
297
def _CheckOSVariant(os_obj, name):
298
  """Check whether an OS name conforms to the os variants specification.
299

300
  @type os_obj: L{objects.OS}
301
  @param os_obj: OS object to check
302
  @type name: string
303
  @param name: OS name passed by the user, to check for validity
304

305
  """
306
  variant = objects.OS.GetVariant(name)
307
  if not os_obj.supported_variants:
308
    if variant:
309
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
310
                                 " passed)" % (os_obj.name, variant),
311
                                 errors.ECODE_INVAL)
312
    return
313
  if not variant:
314
    raise errors.OpPrereqError("OS name must include a variant",
315
                               errors.ECODE_INVAL)
316

    
317
  if variant not in os_obj.supported_variants:
318
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
319

    
320

    
321
def _ComputeInstanceCommunicationNIC(instance_name):
322
  """Compute the name of the instance NIC used by instance
323
  communication.
324

325
  With instance communication, a new NIC is added to the instance.
326
  This NIC has a special name that identities it as being part of
327
  instance communication, and not just a normal NIC.  This function
328
  generates the name of the NIC based on a prefix and the instance
329
  name
330

331
  @type instance_name: string
332
  @param instance_name: name of the instance the NIC belongs to
333

334
  @rtype: string
335
  @return: name of the NIC
336

337
  """
338
  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
339

    
340

    
341
class LUInstanceCreate(LogicalUnit):
342
  """Create an instance.
343

344
  """
345
  HPATH = "instance-add"
346
  HTYPE = constants.HTYPE_INSTANCE
347
  REQ_BGL = False
348

    
349
  def _CheckDiskTemplateValid(self):
350
    """Checks validity of disk template.
351

352
    """
353
    cluster = self.cfg.GetClusterInfo()
354
    if self.op.disk_template is None:
355
      # FIXME: It would be better to take the default disk template from the
356
      # ipolicy, but for the ipolicy we need the primary node, which we get from
357
      # the iallocator, which wants the disk template as input. To solve this
358
      # chicken-and-egg problem, it should be possible to specify just a node
359
      # group from the iallocator and take the ipolicy from that.
360
      self.op.disk_template = cluster.enabled_disk_templates[0]
361
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
362

    
363
  def _CheckDiskArguments(self):
364
    """Checks validity of disk-related arguments.
365

366
    """
367
    # check that disk's names are unique and valid
368
    utils.ValidateDeviceNames("disk", self.op.disks)
369

    
370
    self._CheckDiskTemplateValid()
371

    
372
    # check disks. parameter names and consistent adopt/no-adopt strategy
373
    has_adopt = has_no_adopt = False
374
    for disk in self.op.disks:
375
      if self.op.disk_template != constants.DT_EXT:
376
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
377
      if constants.IDISK_ADOPT in disk:
378
        has_adopt = True
379
      else:
380
        has_no_adopt = True
381
    if has_adopt and has_no_adopt:
382
      raise errors.OpPrereqError("Either all disks are adopted or none is",
383
                                 errors.ECODE_INVAL)
384
    if has_adopt:
385
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
386
        raise errors.OpPrereqError("Disk adoption is not supported for the"
387
                                   " '%s' disk template" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390
      if self.op.iallocator is not None:
391
        raise errors.OpPrereqError("Disk adoption not allowed with an"
392
                                   " iallocator script", errors.ECODE_INVAL)
393
      if self.op.mode == constants.INSTANCE_IMPORT:
394
        raise errors.OpPrereqError("Disk adoption not allowed for"
395
                                   " instance import", errors.ECODE_INVAL)
396
    else:
397
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
398
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
399
                                   " but no 'adopt' parameter given" %
400
                                   self.op.disk_template,
401
                                   errors.ECODE_INVAL)
402

    
403
    self.adopt_disks = has_adopt
404

    
405
  def _CheckVLANArguments(self):
406
    """ Check validity of VLANs if given
407

408
    """
409
    for nic in self.op.nics:
410
      vlan = nic.get(constants.INIC_VLAN, None)
411
      if vlan:
412
        if vlan[0] == ".":
413
          # vlan starting with dot means single untagged vlan,
414
          # might be followed by trunk (:)
415
          if not vlan[1:].isdigit():
416
            vlanlist = vlan[1:].split(':')
417
            for vl in vlanlist:
418
              if not vl.isdigit():
419
                raise errors.OpPrereqError("Specified VLAN parameter is "
420
                                           "invalid : %s" % vlan,
421
                                             errors.ECODE_INVAL)
422
        elif vlan[0] == ":":
423
          # Trunk - tagged only
424
          vlanlist = vlan[1:].split(':')
425
          for vl in vlanlist:
426
            if not vl.isdigit():
427
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
428
                                           " : %s" % vlan, errors.ECODE_INVAL)
429
        elif vlan.isdigit():
430
          # This is the simplest case. No dots, only single digit
431
          # -> Create untagged access port, dot needs to be added
432
          nic[constants.INIC_VLAN] = "." + vlan
433
        else:
434
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
435
                                       " : %s" % vlan, errors.ECODE_INVAL)
436

    
437
  def CheckArguments(self):
438
    """Check arguments.
439

440
    """
441
    # do not require name_check to ease forward/backward compatibility
442
    # for tools
443
    if self.op.no_install and self.op.start:
444
      self.LogInfo("No-installation mode selected, disabling startup")
445
      self.op.start = False
446
    # validate/normalize the instance name
447
    self.op.instance_name = \
448
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
449

    
450
    if self.op.ip_check and not self.op.name_check:
451
      # TODO: make the ip check more flexible and not depend on the name check
452
      raise errors.OpPrereqError("Cannot do IP address check without a name"
453
                                 " check", errors.ECODE_INVAL)
454

    
455
    # add NIC for instance communication
456
    if self.op.instance_communication:
457
      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
458

    
459
      self.op.nics.append({constants.INIC_NAME: nic_name,
460
                           constants.INIC_MAC: constants.VALUE_GENERATE,
461
                           constants.INIC_IP: constants.NIC_IP_POOL,
462
                           constants.INIC_NETWORK:
463
                             self.cfg.GetInstanceCommunicationNetwork()})
464

    
465
    # check nics' parameter names
466
    for nic in self.op.nics:
467
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
468
    # check that NIC's parameters names are unique and valid
469
    utils.ValidateDeviceNames("NIC", self.op.nics)
470

    
471
    self._CheckVLANArguments()
472

    
473
    self._CheckDiskArguments()
474
    assert self.op.disk_template is not None
475

    
476
    # instance name verification
477
    if self.op.name_check:
478
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
479
      self.op.instance_name = self.hostname.name
480
      # used in CheckPrereq for ip ping check
481
      self.check_ip = self.hostname.ip
482
    else:
483
      self.check_ip = None
484

    
485
    # file storage checks
486
    if (self.op.file_driver and
487
        not self.op.file_driver in constants.FILE_DRIVER):
488
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
489
                                 self.op.file_driver, errors.ECODE_INVAL)
490

    
491
    # set default file_driver if unset and required
492
    if (not self.op.file_driver and
493
        self.op.disk_template in constants.DTS_FILEBASED):
494
      self.op.file_driver = constants.FD_LOOP
495

    
496
    ### Node/iallocator related checks
497
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
498

    
499
    if self.op.pnode is not None:
500
      if self.op.disk_template in constants.DTS_INT_MIRROR:
501
        if self.op.snode is None:
502
          raise errors.OpPrereqError("The networked disk templates need"
503
                                     " a mirror node", errors.ECODE_INVAL)
504
      elif self.op.snode:
505
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
506
                        " template")
507
        self.op.snode = None
508

    
509
    _CheckOpportunisticLocking(self.op)
510

    
511
    if self.op.mode == constants.INSTANCE_IMPORT:
512
      # On import force_variant must be True, because if we forced it at
513
      # initial install, our only chance when importing it back is that it
514
      # works again!
515
      self.op.force_variant = True
516

    
517
      if self.op.no_install:
518
        self.LogInfo("No-installation mode has no effect during import")
519

    
520
    elif self.op.mode == constants.INSTANCE_CREATE:
521
      if self.op.os_type is None:
522
        raise errors.OpPrereqError("No guest OS specified",
523
                                   errors.ECODE_INVAL)
524
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
525
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
526
                                   " installation" % self.op.os_type,
527
                                   errors.ECODE_STATE)
528
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
529
      self._cds = GetClusterDomainSecret()
530

    
531
      # Check handshake to ensure both clusters have the same domain secret
532
      src_handshake = self.op.source_handshake
533
      if not src_handshake:
534
        raise errors.OpPrereqError("Missing source handshake",
535
                                   errors.ECODE_INVAL)
536

    
537
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
538
                                                           src_handshake)
539
      if errmsg:
540
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
541
                                   errors.ECODE_INVAL)
542

    
543
      # Load and check source CA
544
      self.source_x509_ca_pem = self.op.source_x509_ca
545
      if not self.source_x509_ca_pem:
546
        raise errors.OpPrereqError("Missing source X509 CA",
547
                                   errors.ECODE_INVAL)
548

    
549
      try:
550
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
551
                                                    self._cds)
552
      except OpenSSL.crypto.Error, err:
553
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
554
                                   (err, ), errors.ECODE_INVAL)
555

    
556
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
557
      if errcode is not None:
558
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
559
                                   errors.ECODE_INVAL)
560

    
561
      self.source_x509_ca = cert
562

    
563
      src_instance_name = self.op.source_instance_name
564
      if not src_instance_name:
565
        raise errors.OpPrereqError("Missing source instance name",
566
                                   errors.ECODE_INVAL)
567

    
568
      self.source_instance_name = \
569
        netutils.GetHostname(name=src_instance_name).name
570

    
571
    else:
572
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
573
                                 self.op.mode, errors.ECODE_INVAL)
574

    
575
  def ExpandNames(self):
576
    """ExpandNames for CreateInstance.
577

578
    Figure out the right locks for instance creation.
579

580
    """
581
    self.needed_locks = {}
582

    
583
    # this is just a preventive check, but someone might still add this
584
    # instance in the meantime, and creation will fail at lock-add time
585
    if self.op.instance_name in\
586
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
587
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
588
                                 self.op.instance_name, errors.ECODE_EXISTS)
589

    
590
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
591

    
592
    if self.op.iallocator:
593
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
594
      # specifying a group on instance creation and then selecting nodes from
595
      # that group
596
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
597
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
598

    
599
      if self.op.opportunistic_locking:
600
        self.opportunistic_locks[locking.LEVEL_NODE] = True
601
    else:
602
      (self.op.pnode_uuid, self.op.pnode) = \
603
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
604
      nodelist = [self.op.pnode_uuid]
605
      if self.op.snode is not None:
606
        (self.op.snode_uuid, self.op.snode) = \
607
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
608
        nodelist.append(self.op.snode_uuid)
609
      self.needed_locks[locking.LEVEL_NODE] = nodelist
610

    
611
    # in case of import lock the source node too
612
    if self.op.mode == constants.INSTANCE_IMPORT:
613
      src_node = self.op.src_node
614
      src_path = self.op.src_path
615

    
616
      if src_path is None:
617
        self.op.src_path = src_path = self.op.instance_name
618

    
619
      if src_node is None:
620
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
621
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
622
        self.op.src_node = None
623
        if os.path.isabs(src_path):
624
          raise errors.OpPrereqError("Importing an instance from a path"
625
                                     " requires a source node option",
626
                                     errors.ECODE_INVAL)
627
      else:
628
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
629
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
630
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
631
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
632
        if not os.path.isabs(src_path):
633
          self.op.src_path = \
634
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
635

    
636
    self.needed_locks[locking.LEVEL_NODE_RES] = \
637
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
638

    
639
    # Optimistically acquire shared group locks (we're reading the
640
    # configuration).  We can't just call GetInstanceNodeGroups, because the
641
    # instance doesn't exist yet. Therefore we lock all node groups of all
642
    # nodes we have.
643
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
644
      # In the case we lock all nodes for opportunistic allocation, we have no
645
      # choice than to lock all groups, because they're allocated before nodes.
646
      # This is sad, but true. At least we release all those we don't need in
647
      # CheckPrereq later.
648
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
649
    else:
650
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
651
        list(self.cfg.GetNodeGroupsFromNodes(
652
          self.needed_locks[locking.LEVEL_NODE]))
653
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
654

    
655
  def DeclareLocks(self, level):
656
    if level == locking.LEVEL_NODE_RES and \
657
      self.opportunistic_locks[locking.LEVEL_NODE]:
658
      # Even when using opportunistic locking, we require the same set of
659
      # NODE_RES locks as we got NODE locks
660
      self.needed_locks[locking.LEVEL_NODE_RES] = \
661
        self.owned_locks(locking.LEVEL_NODE)
662

    
663
  def _RunAllocator(self):
664
    """Run the allocator based on input opcode.
665

666
    """
667
    if self.op.opportunistic_locking:
668
      # Only consider nodes for which a lock is held
669
      node_name_whitelist = self.cfg.GetNodeNames(
670
        self.owned_locks(locking.LEVEL_NODE))
671
    else:
672
      node_name_whitelist = None
673

    
674
    req = _CreateInstanceAllocRequest(self.op, self.disks,
675
                                      self.nics, self.be_full,
676
                                      node_name_whitelist)
677
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
678

    
679
    ial.Run(self.op.iallocator)
680

    
681
    if not ial.success:
682
      # When opportunistic locks are used only a temporary failure is generated
683
      if self.op.opportunistic_locking:
684
        ecode = errors.ECODE_TEMP_NORES
685
      else:
686
        ecode = errors.ECODE_NORES
687

    
688
      raise errors.OpPrereqError("Can't compute nodes using"
689
                                 " iallocator '%s': %s" %
690
                                 (self.op.iallocator, ial.info),
691
                                 ecode)
692

    
693
    (self.op.pnode_uuid, self.op.pnode) = \
694
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
695
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
696
                 self.op.instance_name, self.op.iallocator,
697
                 utils.CommaJoin(ial.result))
698

    
699
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
700

    
701
    if req.RequiredNodes() == 2:
702
      (self.op.snode_uuid, self.op.snode) = \
703
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
704

    
705
  def BuildHooksEnv(self):
706
    """Build hooks env.
707

708
    This runs on master, primary and secondary nodes of the instance.
709

710
    """
711
    env = {
712
      "ADD_MODE": self.op.mode,
713
      }
714
    if self.op.mode == constants.INSTANCE_IMPORT:
715
      env["SRC_NODE"] = self.op.src_node
716
      env["SRC_PATH"] = self.op.src_path
717
      env["SRC_IMAGES"] = self.src_images
718

    
719
    env.update(BuildInstanceHookEnv(
720
      name=self.op.instance_name,
721
      primary_node_name=self.op.pnode,
722
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
723
      status=self.op.start,
724
      os_type=self.op.os_type,
725
      minmem=self.be_full[constants.BE_MINMEM],
726
      maxmem=self.be_full[constants.BE_MAXMEM],
727
      vcpus=self.be_full[constants.BE_VCPUS],
728
      nics=NICListToTuple(self, self.nics),
729
      disk_template=self.op.disk_template,
730
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
731
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
732
             for d in self.disks],
733
      bep=self.be_full,
734
      hvp=self.hv_full,
735
      hypervisor_name=self.op.hypervisor,
736
      tags=self.op.tags,
737
      ))
738

    
739
    return env
740

    
741
  def BuildHooksNodes(self):
742
    """Build hooks nodes.
743

744
    """
745
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
746
    return nl, nl
747

    
748
  def _ReadExportInfo(self):
749
    """Reads the export information from disk.
750

751
    It will override the opcode source node and path with the actual
752
    information, if these two were not specified before.
753

754
    @return: the export information
755

756
    """
757
    assert self.op.mode == constants.INSTANCE_IMPORT
758

    
759
    if self.op.src_node_uuid is None:
760
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
761
      exp_list = self.rpc.call_export_list(locked_nodes)
762
      found = False
763
      for node_uuid in exp_list:
764
        if exp_list[node_uuid].fail_msg:
765
          continue
766
        if self.op.src_path in exp_list[node_uuid].payload:
767
          found = True
768
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
769
          self.op.src_node_uuid = node_uuid
770
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
771
                                            self.op.src_path)
772
          break
773
      if not found:
774
        raise errors.OpPrereqError("No export found for relative path %s" %
775
                                   self.op.src_path, errors.ECODE_INVAL)
776

    
777
    CheckNodeOnline(self, self.op.src_node_uuid)
778
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
779
    result.Raise("No export or invalid export found in dir %s" %
780
                 self.op.src_path)
781

    
782
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
783
    if not export_info.has_section(constants.INISECT_EXP):
784
      raise errors.ProgrammerError("Corrupted export config",
785
                                   errors.ECODE_ENVIRON)
786

    
787
    ei_version = export_info.get(constants.INISECT_EXP, "version")
788
    if int(ei_version) != constants.EXPORT_VERSION:
789
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
790
                                 (ei_version, constants.EXPORT_VERSION),
791
                                 errors.ECODE_ENVIRON)
792
    return export_info
793

    
794
  def _ReadExportParams(self, einfo):
795
    """Use export parameters as defaults.
796

797
    In case the opcode doesn't specify (as in override) some instance
798
    parameters, then try to use them from the export information, if
799
    that declares them.
800

801
    """
802
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
803

    
804
    if not self.op.disks:
805
      disks = []
806
      # TODO: import the disk iv_name too
807
      for idx in range(constants.MAX_DISKS):
808
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
809
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
810
          disks.append({constants.IDISK_SIZE: disk_sz})
811
      self.op.disks = disks
812
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
813
        raise errors.OpPrereqError("No disk info specified and the export"
814
                                   " is missing the disk information",
815
                                   errors.ECODE_INVAL)
816

    
817
    if not self.op.nics:
818
      nics = []
819
      for idx in range(constants.MAX_NICS):
820
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
821
          ndict = {}
822
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
823
            nic_param_name = "nic%d_%s" % (idx, name)
824
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
825
              v = einfo.get(constants.INISECT_INS, nic_param_name)
826
              ndict[name] = v
827
          nics.append(ndict)
828
        else:
829
          break
830
      self.op.nics = nics
831

    
832
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
833
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
834

    
835
    if (self.op.hypervisor is None and
836
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
837
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
838

    
839
    if einfo.has_section(constants.INISECT_HYP):
840
      # use the export parameters but do not override the ones
841
      # specified by the user
842
      for name, value in einfo.items(constants.INISECT_HYP):
843
        if name not in self.op.hvparams:
844
          self.op.hvparams[name] = value
845

    
846
    if einfo.has_section(constants.INISECT_BEP):
847
      # use the parameters, without overriding
848
      for name, value in einfo.items(constants.INISECT_BEP):
849
        if name not in self.op.beparams:
850
          self.op.beparams[name] = value
851
        # Compatibility for the old "memory" be param
852
        if name == constants.BE_MEMORY:
853
          if constants.BE_MAXMEM not in self.op.beparams:
854
            self.op.beparams[constants.BE_MAXMEM] = value
855
          if constants.BE_MINMEM not in self.op.beparams:
856
            self.op.beparams[constants.BE_MINMEM] = value
857
    else:
858
      # try to read the parameters old style, from the main section
859
      for name in constants.BES_PARAMETERS:
860
        if (name not in self.op.beparams and
861
            einfo.has_option(constants.INISECT_INS, name)):
862
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
863

    
864
    if einfo.has_section(constants.INISECT_OSP):
865
      # use the parameters, without overriding
866
      for name, value in einfo.items(constants.INISECT_OSP):
867
        if name not in self.op.osparams:
868
          self.op.osparams[name] = value
869

    
870
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
871
      # use the parameters, without overriding
872
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
873
        if name not in self.op.osparams_private:
874
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
875

    
876
  def _RevertToDefaults(self, cluster):
877
    """Revert the instance parameters to the default values.
878

879
    """
880
    # hvparams
881
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
882
    for name in self.op.hvparams.keys():
883
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
884
        del self.op.hvparams[name]
885
    # beparams
886
    be_defs = cluster.SimpleFillBE({})
887
    for name in self.op.beparams.keys():
888
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
889
        del self.op.beparams[name]
890
    # nic params
891
    nic_defs = cluster.SimpleFillNIC({})
892
    for nic in self.op.nics:
893
      for name in constants.NICS_PARAMETERS:
894
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
895
          del nic[name]
896
    # osparams
897
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
898
    for name in self.op.osparams.keys():
899
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
900
        del self.op.osparams[name]
901

    
902
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
903
                                    os_params_private={})
904
    for name in self.op.osparams_private.keys():
905
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
906
        del self.op.osparams_private[name]
907

    
908
  def _CalculateFileStorageDir(self):
909
    """Calculate final instance file storage dir.
910

911
    """
912
    # file storage dir calculation/check
913
    self.instance_file_storage_dir = None
914
    if self.op.disk_template in constants.DTS_FILEBASED:
915
      # build the full file storage dir path
916
      joinargs = []
917

    
918
      cfg_storage = None
919
      if self.op.disk_template == constants.DT_FILE:
920
        cfg_storage = self.cfg.GetFileStorageDir()
921
      elif self.op.disk_template == constants.DT_SHARED_FILE:
922
        cfg_storage = self.cfg.GetSharedFileStorageDir()
923
      elif self.op.disk_template == constants.DT_GLUSTER:
924
        cfg_storage = self.cfg.GetGlusterStorageDir()
925

    
926
      if not cfg_storage:
927
        raise errors.OpPrereqError(
928
          "Cluster file storage dir for {tpl} storage type not defined".format(
929
            tpl=repr(self.op.disk_template)
930
          ),
931
          errors.ECODE_STATE
932
      )
933

    
934
      joinargs.append(cfg_storage)
935

    
936
      if self.op.file_storage_dir is not None:
937
        joinargs.append(self.op.file_storage_dir)
938

    
939
      if self.op.disk_template != constants.DT_GLUSTER:
940
        joinargs.append(self.op.instance_name)
941

    
942
      if len(joinargs) > 1:
943
        # pylint: disable=W0142
944
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
945
      else:
946
        self.instance_file_storage_dir = joinargs[0]
947

    
948
  def CheckPrereq(self): # pylint: disable=R0914
949
    """Check prerequisites.
950

951
    """
952
    # Check that the optimistically acquired groups are correct wrt the
953
    # acquired nodes
954
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
955
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
956
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
957
    if not owned_groups.issuperset(cur_groups):
958
      raise errors.OpPrereqError("New instance %s's node groups changed since"
959
                                 " locks were acquired, current groups are"
960
                                 " are '%s', owning groups '%s'; retry the"
961
                                 " operation" %
962
                                 (self.op.instance_name,
963
                                  utils.CommaJoin(cur_groups),
964
                                  utils.CommaJoin(owned_groups)),
965
                                 errors.ECODE_STATE)
966

    
967
    self._CalculateFileStorageDir()
968

    
969
    if self.op.mode == constants.INSTANCE_IMPORT:
970
      export_info = self._ReadExportInfo()
971
      self._ReadExportParams(export_info)
972
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
973
    else:
974
      self._old_instance_name = None
975

    
976
    if (not self.cfg.GetVGName() and
977
        self.op.disk_template not in constants.DTS_NOT_LVM):
978
      raise errors.OpPrereqError("Cluster does not support lvm-based"
979
                                 " instances", errors.ECODE_STATE)
980

    
981
    if (self.op.hypervisor is None or
982
        self.op.hypervisor == constants.VALUE_AUTO):
983
      self.op.hypervisor = self.cfg.GetHypervisorType()
984

    
985
    cluster = self.cfg.GetClusterInfo()
986
    enabled_hvs = cluster.enabled_hypervisors
987
    if self.op.hypervisor not in enabled_hvs:
988
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
989
                                 " cluster (%s)" %
990
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
991
                                 errors.ECODE_STATE)
992

    
993
    # Check tag validity
994
    for tag in self.op.tags:
995
      objects.TaggableObject.ValidateTag(tag)
996

    
997
    # check hypervisor parameter syntax (locally)
998
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
999
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
1000
                                      self.op.hvparams)
1001
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
1002
    hv_type.CheckParameterSyntax(filled_hvp)
1003
    self.hv_full = filled_hvp
1004
    # check that we don't specify global parameters on an instance
1005
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
1006
                         "instance", "cluster")
1007

    
1008
    # fill and remember the beparams dict
1009
    self.be_full = _ComputeFullBeParams(self.op, cluster)
1010

    
1011
    # build os parameters
1012
    if self.op.osparams_private is None:
1013
      self.op.osparams_private = serializer.PrivateDict()
1014
    if self.op.osparams_secret is None:
1015
      self.op.osparams_secret = serializer.PrivateDict()
1016

    
1017
    self.os_full = cluster.SimpleFillOS(
1018
      self.op.os_type,
1019
      self.op.osparams,
1020
      os_params_private=self.op.osparams_private,
1021
      os_params_secret=self.op.osparams_secret
1022
    )
1023

    
1024
    # now that hvp/bep are in final format, let's reset to defaults,
1025
    # if told to do so
1026
    if self.op.identify_defaults:
1027
      self._RevertToDefaults(cluster)
1028

    
1029
    # NIC buildup
1030
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1031
                             self.proc.GetECId())
1032

    
1033
    # disk checks/pre-build
1034
    default_vg = self.cfg.GetVGName()
1035
    self.disks = ComputeDisks(self.op, default_vg)
1036

    
1037
    if self.op.mode == constants.INSTANCE_IMPORT:
1038
      disk_images = []
1039
      for idx in range(len(self.disks)):
1040
        option = "disk%d_dump" % idx
1041
        if export_info.has_option(constants.INISECT_INS, option):
1042
          # FIXME: are the old os-es, disk sizes, etc. useful?
1043
          export_name = export_info.get(constants.INISECT_INS, option)
1044
          image = utils.PathJoin(self.op.src_path, export_name)
1045
          disk_images.append(image)
1046
        else:
1047
          disk_images.append(False)
1048

    
1049
      self.src_images = disk_images
1050

    
1051
      if self.op.instance_name == self._old_instance_name:
1052
        for idx, nic in enumerate(self.nics):
1053
          if nic.mac == constants.VALUE_AUTO:
1054
            nic_mac_ini = "nic%d_mac" % idx
1055
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1056

    
1057
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1058

    
1059
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1060
    if self.op.ip_check:
1061
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1062
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1063
                                   (self.check_ip, self.op.instance_name),
1064
                                   errors.ECODE_NOTUNIQUE)
1065

    
1066
    #### mac address generation
1067
    # By generating here the mac address both the allocator and the hooks get
1068
    # the real final mac address rather than the 'auto' or 'generate' value.
1069
    # There is a race condition between the generation and the instance object
1070
    # creation, which means that we know the mac is valid now, but we're not
1071
    # sure it will be when we actually add the instance. If things go bad
1072
    # adding the instance will abort because of a duplicate mac, and the
1073
    # creation job will fail.
1074
    for nic in self.nics:
1075
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1076
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1077

    
1078
    #### allocator run
1079

    
1080
    if self.op.iallocator is not None:
1081
      self._RunAllocator()
1082

    
1083
    # Release all unneeded node locks
1084
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1085
                               self.op.src_node_uuid])
1086
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1087
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1088
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1089
    # Release all unneeded group locks
1090
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1091
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1092

    
1093
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1094
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1095
      "Node locks differ from node resource locks"
1096

    
1097
    #### node related checks
1098

    
1099
    # check primary node
1100
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1101
    assert self.pnode is not None, \
1102
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1103
    if pnode.offline:
1104
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1105
                                 pnode.name, errors.ECODE_STATE)
1106
    if pnode.drained:
1107
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1108
                                 pnode.name, errors.ECODE_STATE)
1109
    if not pnode.vm_capable:
1110
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1111
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1112

    
1113
    self.secondaries = []
1114

    
1115
    # Fill in any IPs from IP pools. This must happen here, because we need to
1116
    # know the nic's primary node, as specified by the iallocator
1117
    for idx, nic in enumerate(self.nics):
1118
      net_uuid = nic.network
1119
      if net_uuid is not None:
1120
        nobj = self.cfg.GetNetwork(net_uuid)
1121
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1122
        if netparams is None:
1123
          raise errors.OpPrereqError("No netparams found for network"
1124
                                     " %s. Probably not connected to"
1125
                                     " node's %s nodegroup" %
1126
                                     (nobj.name, self.pnode.name),
1127
                                     errors.ECODE_INVAL)
1128
        self.LogInfo("NIC/%d inherits netparams %s" %
1129
                     (idx, netparams.values()))
1130
        nic.nicparams = dict(netparams)
1131
        if nic.ip is not None:
1132
          if nic.ip.lower() == constants.NIC_IP_POOL:
1133
            try:
1134
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1135
            except errors.ReservationError:
1136
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1137
                                         " from the address pool" % idx,
1138
                                         errors.ECODE_STATE)
1139
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1140
          else:
1141
            try:
1142
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1143
                                 check=self.op.conflicts_check)
1144
            except errors.ReservationError:
1145
              raise errors.OpPrereqError("IP address %s already in use"
1146
                                         " or does not belong to network %s" %
1147
                                         (nic.ip, nobj.name),
1148
                                         errors.ECODE_NOTUNIQUE)
1149

    
1150
      # net is None, ip None or given
1151
      elif self.op.conflicts_check:
1152
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1153

    
1154
    # mirror node verification
1155
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1156
      if self.op.snode_uuid == pnode.uuid:
1157
        raise errors.OpPrereqError("The secondary node cannot be the"
1158
                                   " primary node", errors.ECODE_INVAL)
1159
      CheckNodeOnline(self, self.op.snode_uuid)
1160
      CheckNodeNotDrained(self, self.op.snode_uuid)
1161
      CheckNodeVmCapable(self, self.op.snode_uuid)
1162
      self.secondaries.append(self.op.snode_uuid)
1163

    
1164
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1165
      if pnode.group != snode.group:
1166
        self.LogWarning("The primary and secondary nodes are in two"
1167
                        " different node groups; the disk parameters"
1168
                        " from the first disk's node group will be"
1169
                        " used")
1170

    
1171
    nodes = [pnode]
1172
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1173
      nodes.append(snode)
1174
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1175
    excl_stor = compat.any(map(has_es, nodes))
1176
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1177
      raise errors.OpPrereqError("Disk template %s not supported with"
1178
                                 " exclusive storage" % self.op.disk_template,
1179
                                 errors.ECODE_STATE)
1180
    for disk in self.disks:
1181
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1182

    
1183
    node_uuids = [pnode.uuid] + self.secondaries
1184

    
1185
    if not self.adopt_disks:
1186
      if self.op.disk_template == constants.DT_RBD:
1187
        # _CheckRADOSFreeSpace() is just a placeholder.
1188
        # Any function that checks prerequisites can be placed here.
1189
        # Check if there is enough space on the RADOS cluster.
1190
        CheckRADOSFreeSpace()
1191
      elif self.op.disk_template == constants.DT_EXT:
1192
        # FIXME: Function that checks prereqs if needed
1193
        pass
1194
      elif self.op.disk_template in constants.DTS_LVM:
1195
        # Check lv size requirements, if not adopting
1196
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1197
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1198
      else:
1199
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1200
        pass
1201

    
1202
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1203
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1204
                                disk[constants.IDISK_ADOPT])
1205
                     for disk in self.disks])
1206
      if len(all_lvs) != len(self.disks):
1207
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1208
                                   errors.ECODE_INVAL)
1209
      for lv_name in all_lvs:
1210
        try:
1211
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1212
          # to ReserveLV uses the same syntax
1213
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1214
        except errors.ReservationError:
1215
          raise errors.OpPrereqError("LV named %s used by another instance" %
1216
                                     lv_name, errors.ECODE_NOTUNIQUE)
1217

    
1218
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1219
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1220

    
1221
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1222
                                       vg_names.payload.keys())[pnode.uuid]
1223
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1224
      node_lvs = node_lvs.payload
1225

    
1226
      delta = all_lvs.difference(node_lvs.keys())
1227
      if delta:
1228
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1229
                                   utils.CommaJoin(delta),
1230
                                   errors.ECODE_INVAL)
1231
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1232
      if online_lvs:
1233
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1234
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1235
                                   errors.ECODE_STATE)
1236
      # update the size of disk based on what is found
1237
      for dsk in self.disks:
1238
        dsk[constants.IDISK_SIZE] = \
1239
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1240
                                        dsk[constants.IDISK_ADOPT])][0]))
1241

    
1242
    elif self.op.disk_template == constants.DT_BLOCK:
1243
      # Normalize and de-duplicate device paths
1244
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1245
                       for disk in self.disks])
1246
      if len(all_disks) != len(self.disks):
1247
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1248
                                   errors.ECODE_INVAL)
1249
      baddisks = [d for d in all_disks
1250
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1251
      if baddisks:
1252
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1253
                                   " cannot be adopted" %
1254
                                   (utils.CommaJoin(baddisks),
1255
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1256
                                   errors.ECODE_INVAL)
1257

    
1258
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1259
                                            list(all_disks))[pnode.uuid]
1260
      node_disks.Raise("Cannot get block device information from node %s" %
1261
                       pnode.name)
1262
      node_disks = node_disks.payload
1263
      delta = all_disks.difference(node_disks.keys())
1264
      if delta:
1265
        raise errors.OpPrereqError("Missing block device(s): %s" %
1266
                                   utils.CommaJoin(delta),
1267
                                   errors.ECODE_INVAL)
1268
      for dsk in self.disks:
1269
        dsk[constants.IDISK_SIZE] = \
1270
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1271

    
1272
    # Check disk access param to be compatible with specified hypervisor
1273
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1274
    node_group = self.cfg.GetNodeGroup(node_info.group)
1275
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1276
    access_type = disk_params[self.op.disk_template].get(
1277
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1278
    )
1279

    
1280
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1281
                                            self.op.disk_template,
1282
                                            access_type):
1283
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1284
                                 " used with %s disk access param" %
1285
                                 (self.op.hypervisor, access_type),
1286
                                  errors.ECODE_STATE)
1287

    
1288
    # Verify instance specs
1289
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1290
    ispec = {
1291
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1292
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1293
      constants.ISPEC_DISK_COUNT: len(self.disks),
1294
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1295
                                  for disk in self.disks],
1296
      constants.ISPEC_NIC_COUNT: len(self.nics),
1297
      constants.ISPEC_SPINDLE_USE: spindle_use,
1298
      }
1299

    
1300
    group_info = self.cfg.GetNodeGroup(pnode.group)
1301
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1302
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1303
                                               self.op.disk_template)
1304
    if not self.op.ignore_ipolicy and res:
1305
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1306
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1307
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1308

    
1309
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1310

    
1311
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1312
    # check OS parameters (remotely)
1313
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1314

    
1315
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1316

    
1317
    #TODO: _CheckExtParams (remotely)
1318
    # Check parameters for extstorage
1319

    
1320
    # memory check on primary node
1321
    #TODO(dynmem): use MINMEM for checking
1322
    if self.op.start:
1323
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1324
                                self.op.hvparams)
1325
      CheckNodeFreeMemory(self, self.pnode.uuid,
1326
                          "creating instance %s" % self.op.instance_name,
1327
                          self.be_full[constants.BE_MAXMEM],
1328
                          self.op.hypervisor, hvfull)
1329

    
1330
    self.dry_run_result = list(node_uuids)
1331

    
1332
  def Exec(self, feedback_fn):
1333
    """Create and add the instance to the cluster.
1334

1335
    """
1336
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1337
                self.owned_locks(locking.LEVEL_NODE)), \
1338
      "Node locks differ from node resource locks"
1339
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1340

    
1341
    ht_kind = self.op.hypervisor
1342
    if ht_kind in constants.HTS_REQ_PORT:
1343
      network_port = self.cfg.AllocatePort()
1344
    else:
1345
      network_port = None
1346

    
1347
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1348

    
1349
    # This is ugly but we got a chicken-egg problem here
1350
    # We can only take the group disk parameters, as the instance
1351
    # has no disks yet (we are generating them right here).
1352
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1353
    disks = GenerateDiskTemplate(self,
1354
                                 self.op.disk_template,
1355
                                 instance_uuid, self.pnode.uuid,
1356
                                 self.secondaries,
1357
                                 self.disks,
1358
                                 self.instance_file_storage_dir,
1359
                                 self.op.file_driver,
1360
                                 0,
1361
                                 feedback_fn,
1362
                                 self.cfg.GetGroupDiskParams(nodegroup))
1363

    
1364
    iobj = objects.Instance(name=self.op.instance_name,
1365
                            uuid=instance_uuid,
1366
                            os=self.op.os_type,
1367
                            primary_node=self.pnode.uuid,
1368
                            nics=self.nics, disks=disks,
1369
                            disk_template=self.op.disk_template,
1370
                            disks_active=False,
1371
                            admin_state=constants.ADMINST_DOWN,
1372
                            network_port=network_port,
1373
                            beparams=self.op.beparams,
1374
                            hvparams=self.op.hvparams,
1375
                            hypervisor=self.op.hypervisor,
1376
                            osparams=self.op.osparams,
1377
                            osparams_private=self.op.osparams_private,
1378
                            )
1379

    
1380
    if self.op.tags:
1381
      for tag in self.op.tags:
1382
        iobj.AddTag(tag)
1383

    
1384
    if self.adopt_disks:
1385
      if self.op.disk_template == constants.DT_PLAIN:
1386
        # rename LVs to the newly-generated names; we need to construct
1387
        # 'fake' LV disks with the old data, plus the new unique_id
1388
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1389
        rename_to = []
1390
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1391
          rename_to.append(t_dsk.logical_id)
1392
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1393
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1394
                                               zip(tmp_disks, rename_to))
1395
        result.Raise("Failed to rename adoped LVs")
1396
    else:
1397
      feedback_fn("* creating instance disks...")
1398
      try:
1399
        CreateDisks(self, iobj)
1400
      except errors.OpExecError:
1401
        self.LogWarning("Device creation failed")
1402
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1403
        raise
1404

    
1405
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1406

    
1407
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1408

    
1409
    # Declare that we don't want to remove the instance lock anymore, as we've
1410
    # added the instance to the config
1411
    del self.remove_locks[locking.LEVEL_INSTANCE]
1412

    
1413
    if self.op.mode == constants.INSTANCE_IMPORT:
1414
      # Release unused nodes
1415
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1416
    else:
1417
      # Release all nodes
1418
      ReleaseLocks(self, locking.LEVEL_NODE)
1419

    
1420
    disk_abort = False
1421
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1422
      feedback_fn("* wiping instance disks...")
1423
      try:
1424
        WipeDisks(self, iobj)
1425
      except errors.OpExecError, err:
1426
        logging.exception("Wiping disks failed")
1427
        self.LogWarning("Wiping instance disks failed (%s)", err)
1428
        disk_abort = True
1429

    
1430
    if disk_abort:
1431
      # Something is already wrong with the disks, don't do anything else
1432
      pass
1433
    elif self.op.wait_for_sync:
1434
      disk_abort = not WaitForSync(self, iobj)
1435
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1436
      # make sure the disks are not degraded (still sync-ing is ok)
1437
      feedback_fn("* checking mirrors status")
1438
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1439
    else:
1440
      disk_abort = False
1441

    
1442
    if disk_abort:
1443
      RemoveDisks(self, iobj)
1444
      self.cfg.RemoveInstance(iobj.uuid)
1445
      # Make sure the instance lock gets removed
1446
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1447
      raise errors.OpExecError("There are some degraded disks for"
1448
                               " this instance")
1449

    
1450
    # instance disks are now active
1451
    iobj.disks_active = True
1452

    
1453
    # Release all node resource locks
1454
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1455

    
1456
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1457
      if self.op.mode == constants.INSTANCE_CREATE:
1458
        if not self.op.no_install:
1459
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1460
                        not self.op.wait_for_sync)
1461
          if pause_sync:
1462
            feedback_fn("* pausing disk sync to install instance OS")
1463
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1464
                                                              (iobj.disks,
1465
                                                               iobj), True)
1466
            for idx, success in enumerate(result.payload):
1467
              if not success:
1468
                logging.warn("pause-sync of instance %s for disk %d failed",
1469
                             self.op.instance_name, idx)
1470

    
1471
          feedback_fn("* running the instance OS create scripts...")
1472
          # FIXME: pass debug option from opcode to backend
1473
          os_add_result = \
1474
            self.rpc.call_instance_os_add(self.pnode.uuid,
1475
                                          (iobj, self.op.osparams_secret),
1476
                                          False,
1477
                                          self.op.debug_level)
1478
          if pause_sync:
1479
            feedback_fn("* resuming disk sync")
1480
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1481
                                                              (iobj.disks,
1482
                                                               iobj), False)
1483
            for idx, success in enumerate(result.payload):
1484
              if not success:
1485
                logging.warn("resume-sync of instance %s for disk %d failed",
1486
                             self.op.instance_name, idx)
1487

    
1488
          os_add_result.Raise("Could not add os for instance %s"
1489
                              " on node %s" % (self.op.instance_name,
1490
                                               self.pnode.name))
1491

    
1492
      else:
1493
        if self.op.mode == constants.INSTANCE_IMPORT:
1494
          feedback_fn("* running the instance OS import scripts...")
1495

    
1496
          transfers = []
1497

    
1498
          for idx, image in enumerate(self.src_images):
1499
            if not image:
1500
              continue
1501

    
1502
            # FIXME: pass debug option from opcode to backend
1503
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1504
                                               constants.IEIO_FILE, (image, ),
1505
                                               constants.IEIO_SCRIPT,
1506
                                               ((iobj.disks[idx], iobj), idx),
1507
                                               None)
1508
            transfers.append(dt)
1509

    
1510
          import_result = \
1511
            masterd.instance.TransferInstanceData(self, feedback_fn,
1512
                                                  self.op.src_node_uuid,
1513
                                                  self.pnode.uuid,
1514
                                                  self.pnode.secondary_ip,
1515
                                                  self.op.compress,
1516
                                                  iobj, transfers)
1517
          if not compat.all(import_result):
1518
            self.LogWarning("Some disks for instance %s on node %s were not"
1519
                            " imported successfully" % (self.op.instance_name,
1520
                                                        self.pnode.name))
1521

    
1522
          rename_from = self._old_instance_name
1523

    
1524
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1525
          feedback_fn("* preparing remote import...")
1526
          # The source cluster will stop the instance before attempting to make
1527
          # a connection. In some cases stopping an instance can take a long
1528
          # time, hence the shutdown timeout is added to the connection
1529
          # timeout.
1530
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1531
                             self.op.source_shutdown_timeout)
1532
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1533

    
1534
          assert iobj.primary_node == self.pnode.uuid
1535
          disk_results = \
1536
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1537
                                          self.source_x509_ca,
1538
                                          self._cds, self.op.compress, timeouts)
1539
          if not compat.all(disk_results):
1540
            # TODO: Should the instance still be started, even if some disks
1541
            # failed to import (valid for local imports, too)?
1542
            self.LogWarning("Some disks for instance %s on node %s were not"
1543
                            " imported successfully" % (self.op.instance_name,
1544
                                                        self.pnode.name))
1545

    
1546
          rename_from = self.source_instance_name
1547

    
1548
        else:
1549
          # also checked in the prereq part
1550
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1551
                                       % self.op.mode)
1552

    
1553
        # Run rename script on newly imported instance
1554
        assert iobj.name == self.op.instance_name
1555
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1556
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1557
                                                   rename_from,
1558
                                                   self.op.debug_level)
1559
        result.Warn("Failed to run rename script for %s on node %s" %
1560
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1561

    
1562
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1563

    
1564
    if self.op.start:
1565
      iobj.admin_state = constants.ADMINST_UP
1566
      self.cfg.Update(iobj, feedback_fn)
1567
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1568
                   self.pnode.name)
1569
      feedback_fn("* starting instance...")
1570
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1571
                                            False, self.op.reason)
1572
      result.Raise("Could not start instance")
1573

    
1574
    return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj)))
1575

    
1576

    
1577
class LUInstanceRename(LogicalUnit):
1578
  """Rename an instance.
1579

1580
  """
1581
  HPATH = "instance-rename"
1582
  HTYPE = constants.HTYPE_INSTANCE
1583

    
1584
  def CheckArguments(self):
1585
    """Check arguments.
1586

1587
    """
1588
    if self.op.ip_check and not self.op.name_check:
1589
      # TODO: make the ip check more flexible and not depend on the name check
1590
      raise errors.OpPrereqError("IP address check requires a name check",
1591
                                 errors.ECODE_INVAL)
1592

    
1593
  def BuildHooksEnv(self):
1594
    """Build hooks env.
1595

1596
    This runs on master, primary and secondary nodes of the instance.
1597

1598
    """
1599
    env = BuildInstanceHookEnvByObject(self, self.instance)
1600
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1601
    return env
1602

    
1603
  def BuildHooksNodes(self):
1604
    """Build hooks nodes.
1605

1606
    """
1607
    nl = [self.cfg.GetMasterNode()] + \
1608
      list(self.cfg.GetInstanceNodes(self.instance))
1609
    return (nl, nl)
1610

    
1611
  def CheckPrereq(self):
1612
    """Check prerequisites.
1613

1614
    This checks that the instance is in the cluster and is not running.
1615

1616
    """
1617
    (self.op.instance_uuid, self.op.instance_name) = \
1618
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1619
                                self.op.instance_name)
1620
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1621
    assert instance is not None
1622

    
1623
    # It should actually not happen that an instance is running with a disabled
1624
    # disk template, but in case it does, the renaming of file-based instances
1625
    # will fail horribly. Thus, we test it before.
1626
    if (instance.disk_template in constants.DTS_FILEBASED and
1627
        self.op.new_name != instance.name):
1628
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1629
                               instance.disk_template)
1630

    
1631
    CheckNodeOnline(self, instance.primary_node)
1632
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1633
                       msg="cannot rename")
1634
    self.instance = instance
1635

    
1636
    new_name = self.op.new_name
1637
    if self.op.name_check:
1638
      hostname = _CheckHostnameSane(self, new_name)
1639
      new_name = self.op.new_name = hostname.name
1640
      if (self.op.ip_check and
1641
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1642
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1643
                                   (hostname.ip, new_name),
1644
                                   errors.ECODE_NOTUNIQUE)
1645

    
1646
    instance_names = [inst.name for
1647
                      inst in self.cfg.GetAllInstancesInfo().values()]
1648
    if new_name in instance_names and new_name != instance.name:
1649
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1650
                                 new_name, errors.ECODE_EXISTS)
1651

    
1652
  def Exec(self, feedback_fn):
1653
    """Rename the instance.
1654

1655
    """
1656
    old_name = self.instance.name
1657

    
1658
    rename_file_storage = False
1659
    if (self.instance.disk_template in (constants.DT_FILE,
1660
                                        constants.DT_SHARED_FILE) and
1661
        self.op.new_name != self.instance.name):
1662
      old_file_storage_dir = os.path.dirname(
1663
                               self.instance.disks[0].logical_id[1])
1664
      rename_file_storage = True
1665

    
1666
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1667
    # Change the instance lock. This is definitely safe while we hold the BGL.
1668
    # Otherwise the new lock would have to be added in acquired mode.
1669
    assert self.REQ_BGL
1670
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1671
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1672
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1673

    
1674
    # re-read the instance from the configuration after rename
1675
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1676

    
1677
    if rename_file_storage:
1678
      new_file_storage_dir = os.path.dirname(
1679
                               renamed_inst.disks[0].logical_id[1])
1680
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1681
                                                     old_file_storage_dir,
1682
                                                     new_file_storage_dir)
1683
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1684
                   " (but the instance has been renamed in Ganeti)" %
1685
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1686
                    old_file_storage_dir, new_file_storage_dir))
1687

    
1688
    StartInstanceDisks(self, renamed_inst, None)
1689
    # update info on disks
1690
    info = GetInstanceInfoText(renamed_inst)
1691
    for (idx, disk) in enumerate(renamed_inst.disks):
1692
      for node_uuid in self.cfg.GetInstanceNodes(renamed_inst):
1693
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1694
                                                (disk, renamed_inst), info)
1695
        result.Warn("Error setting info on node %s for disk %s" %
1696
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1697
    try:
1698
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1699
                                                 renamed_inst, old_name,
1700
                                                 self.op.debug_level)
1701
      result.Warn("Could not run OS rename script for instance %s on node %s"
1702
                  " (but the instance has been renamed in Ganeti)" %
1703
                  (renamed_inst.name,
1704
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1705
                  self.LogWarning)
1706
    finally:
1707
      ShutdownInstanceDisks(self, renamed_inst)
1708

    
1709
    return renamed_inst.name
1710

    
1711

    
1712
class LUInstanceRemove(LogicalUnit):
1713
  """Remove an instance.
1714

1715
  """
1716
  HPATH = "instance-remove"
1717
  HTYPE = constants.HTYPE_INSTANCE
1718
  REQ_BGL = False
1719

    
1720
  def ExpandNames(self):
1721
    self._ExpandAndLockInstance()
1722
    self.needed_locks[locking.LEVEL_NODE] = []
1723
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1724
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1725

    
1726
  def DeclareLocks(self, level):
1727
    if level == locking.LEVEL_NODE:
1728
      self._LockInstancesNodes()
1729
    elif level == locking.LEVEL_NODE_RES:
1730
      # Copy node locks
1731
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1732
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1733

    
1734
  def BuildHooksEnv(self):
1735
    """Build hooks env.
1736

1737
    This runs on master, primary and secondary nodes of the instance.
1738

1739
    """
1740
    env = BuildInstanceHookEnvByObject(self, self.instance)
1741
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1742
    return env
1743

    
1744
  def BuildHooksNodes(self):
1745
    """Build hooks nodes.
1746

1747
    """
1748
    nl = [self.cfg.GetMasterNode()]
1749
    nl_post = list(self.cfg.GetInstanceNodes(self.instance)) + nl
1750
    return (nl, nl_post)
1751

    
1752
  def CheckPrereq(self):
1753
    """Check prerequisites.
1754

1755
    This checks that the instance is in the cluster.
1756

1757
    """
1758
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1759
    assert self.instance is not None, \
1760
      "Cannot retrieve locked instance %s" % self.op.instance_name
1761

    
1762
  def Exec(self, feedback_fn):
1763
    """Remove the instance.
1764

1765
    """
1766
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1767
                 self.cfg.GetNodeName(self.instance.primary_node))
1768

    
1769
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1770
                                             self.instance,
1771
                                             self.op.shutdown_timeout,
1772
                                             self.op.reason)
1773
    if self.op.ignore_failures:
1774
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1775
    else:
1776
      result.Raise("Could not shutdown instance %s on node %s" %
1777
                   (self.instance.name,
1778
                    self.cfg.GetNodeName(self.instance.primary_node)))
1779

    
1780
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1781
            self.owned_locks(locking.LEVEL_NODE_RES))
1782
    assert not (set(self.cfg.GetInstanceNodes(self.instance)) -
1783
                self.owned_locks(locking.LEVEL_NODE)), \
1784
      "Not owning correct locks"
1785

    
1786
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1787

    
1788

    
1789
class LUInstanceMove(LogicalUnit):
1790
  """Move an instance by data-copying.
1791

1792
  """
1793
  HPATH = "instance-move"
1794
  HTYPE = constants.HTYPE_INSTANCE
1795
  REQ_BGL = False
1796

    
1797
  def ExpandNames(self):
1798
    self._ExpandAndLockInstance()
1799
    (self.op.target_node_uuid, self.op.target_node) = \
1800
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1801
                            self.op.target_node)
1802
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1803
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1804
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1805

    
1806
  def DeclareLocks(self, level):
1807
    if level == locking.LEVEL_NODE:
1808
      self._LockInstancesNodes(primary_only=True)
1809
    elif level == locking.LEVEL_NODE_RES:
1810
      # Copy node locks
1811
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1812
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1813

    
1814
  def BuildHooksEnv(self):
1815
    """Build hooks env.
1816

1817
    This runs on master, primary and target nodes of the instance.
1818

1819
    """
1820
    env = {
1821
      "TARGET_NODE": self.op.target_node,
1822
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1823
      }
1824
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1825
    return env
1826

    
1827
  def BuildHooksNodes(self):
1828
    """Build hooks nodes.
1829

1830
    """
1831
    nl = [
1832
      self.cfg.GetMasterNode(),
1833
      self.instance.primary_node,
1834
      self.op.target_node_uuid,
1835
      ]
1836
    return (nl, nl)
1837

    
1838
  def CheckPrereq(self):
1839
    """Check prerequisites.
1840

1841
    This checks that the instance is in the cluster.
1842

1843
    """
1844
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1845
    assert self.instance is not None, \
1846
      "Cannot retrieve locked instance %s" % self.op.instance_name
1847

    
1848
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1849
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1850
                                 self.instance.disk_template,
1851
                                 errors.ECODE_STATE)
1852

    
1853
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1854
    assert target_node is not None, \
1855
      "Cannot retrieve locked node %s" % self.op.target_node
1856

    
1857
    self.target_node_uuid = target_node.uuid
1858
    if target_node.uuid == self.instance.primary_node:
1859
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1860
                                 (self.instance.name, target_node.name),
1861
                                 errors.ECODE_STATE)
1862

    
1863
    cluster = self.cfg.GetClusterInfo()
1864
    bep = cluster.FillBE(self.instance)
1865

    
1866
    for idx, dsk in enumerate(self.instance.disks):
1867
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1868
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1869
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1870
                                   " cannot copy" % idx, errors.ECODE_STATE)
1871

    
1872
    CheckNodeOnline(self, target_node.uuid)
1873
    CheckNodeNotDrained(self, target_node.uuid)
1874
    CheckNodeVmCapable(self, target_node.uuid)
1875
    group_info = self.cfg.GetNodeGroup(target_node.group)
1876
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1877
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1878
                           ignore=self.op.ignore_ipolicy)
1879

    
1880
    if self.instance.admin_state == constants.ADMINST_UP:
1881
      # check memory requirements on the target node
1882
      CheckNodeFreeMemory(
1883
          self, target_node.uuid, "failing over instance %s" %
1884
          self.instance.name, bep[constants.BE_MAXMEM],
1885
          self.instance.hypervisor,
1886
          cluster.hvparams[self.instance.hypervisor])
1887
    else:
1888
      self.LogInfo("Not checking memory on the secondary node as"
1889
                   " instance will not be started")
1890

    
1891
    # check bridge existance
1892
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1893

    
1894
  def Exec(self, feedback_fn):
1895
    """Move an instance.
1896

1897
    The move is done by shutting it down on its present node, copying
1898
    the data over (slow) and starting it on the new node.
1899

1900
    """
1901
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1902
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1903

    
1904
    self.LogInfo("Shutting down instance %s on source node %s",
1905
                 self.instance.name, source_node.name)
1906

    
1907
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1908
            self.owned_locks(locking.LEVEL_NODE_RES))
1909

    
1910
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1911
                                             self.op.shutdown_timeout,
1912
                                             self.op.reason)
1913
    if self.op.ignore_consistency:
1914
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1915
                  " anyway. Please make sure node %s is down. Error details" %
1916
                  (self.instance.name, source_node.name, source_node.name),
1917
                  self.LogWarning)
1918
    else:
1919
      result.Raise("Could not shutdown instance %s on node %s" %
1920
                   (self.instance.name, source_node.name))
1921

    
1922
    # create the target disks
1923
    try:
1924
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1925
    except errors.OpExecError:
1926
      self.LogWarning("Device creation failed")
1927
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1928
      raise
1929

    
1930
    errs = []
1931
    transfers = []
1932
    # activate, get path, create transfer jobs
1933
    for idx, disk in enumerate(self.instance.disks):
1934
      # FIXME: pass debug option from opcode to backend
1935
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1936
                                         constants.IEIO_RAW_DISK,
1937
                                         (disk, self.instance),
1938
                                         constants.IEIO_RAW_DISK,
1939
                                         (disk, self.instance),
1940
                                         None)
1941
      transfers.append(dt)
1942

    
1943
    import_result = \
1944
      masterd.instance.TransferInstanceData(self, feedback_fn,
1945
                                            source_node.uuid,
1946
                                            target_node.uuid,
1947
                                            target_node.secondary_ip,
1948
                                            self.op.compress,
1949
                                            self.instance, transfers)
1950
    if not compat.all(import_result):
1951
      errs.append("Failed to transfer instance data")
1952

    
1953
    if errs:
1954
      self.LogWarning("Some disks failed to copy, aborting")
1955
      try:
1956
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1957
      finally:
1958
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1959
        raise errors.OpExecError("Errors during disk copy: %s" %
1960
                                 (",".join(errs),))
1961

    
1962
    self.instance.primary_node = target_node.uuid
1963
    self.cfg.Update(self.instance, feedback_fn)
1964

    
1965
    self.LogInfo("Removing the disks on the original node")
1966
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1967

    
1968
    # Only start the instance if it's marked as up
1969
    if self.instance.admin_state == constants.ADMINST_UP:
1970
      self.LogInfo("Starting instance %s on node %s",
1971
                   self.instance.name, target_node.name)
1972

    
1973
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1974
                                          ignore_secondaries=True)
1975
      if not disks_ok:
1976
        ShutdownInstanceDisks(self, self.instance)
1977
        raise errors.OpExecError("Can't activate the instance's disks")
1978

    
1979
      result = self.rpc.call_instance_start(target_node.uuid,
1980
                                            (self.instance, None, None), False,
1981
                                            self.op.reason)
1982
      msg = result.fail_msg
1983
      if msg:
1984
        ShutdownInstanceDisks(self, self.instance)
1985
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1986
                                 (self.instance.name, target_node.name, msg))
1987

    
1988

    
1989
class LUInstanceMultiAlloc(NoHooksLU):
1990
  """Allocates multiple instances at the same time.
1991

1992
  """
1993
  REQ_BGL = False
1994

    
1995
  def CheckArguments(self):
1996
    """Check arguments.
1997

1998
    """
1999
    nodes = []
2000
    for inst in self.op.instances:
2001
      if inst.iallocator is not None:
2002
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
2003
                                   " instance objects", errors.ECODE_INVAL)
2004
      nodes.append(bool(inst.pnode))
2005
      if inst.disk_template in constants.DTS_INT_MIRROR:
2006
        nodes.append(bool(inst.snode))
2007

    
2008
    has_nodes = compat.any(nodes)
2009
    if compat.all(nodes) ^ has_nodes:
2010
      raise errors.OpPrereqError("There are instance objects providing"
2011
                                 " pnode/snode while others do not",
2012
                                 errors.ECODE_INVAL)
2013

    
2014
    if not has_nodes and self.op.iallocator is None:
2015
      default_iallocator = self.cfg.GetDefaultIAllocator()
2016
      if default_iallocator:
2017
        self.op.iallocator = default_iallocator
2018
      else:
2019
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2020
                                   " given and no cluster-wide default"
2021
                                   " iallocator found; please specify either"
2022
                                   " an iallocator or nodes on the instances"
2023
                                   " or set a cluster-wide default iallocator",
2024
                                   errors.ECODE_INVAL)
2025

    
2026
    _CheckOpportunisticLocking(self.op)
2027

    
2028
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2029
    if dups:
2030
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2031
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2032

    
2033
  def ExpandNames(self):
2034
    """Calculate the locks.
2035

2036
    """
2037
    self.share_locks = ShareAll()
2038
    self.needed_locks = {
2039
      # iallocator will select nodes and even if no iallocator is used,
2040
      # collisions with LUInstanceCreate should be avoided
2041
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2042
      }
2043

    
2044
    if self.op.iallocator:
2045
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2046
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2047

    
2048
      if self.op.opportunistic_locking:
2049
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2050
    else:
2051
      nodeslist = []
2052
      for inst in self.op.instances:
2053
        (inst.pnode_uuid, inst.pnode) = \
2054
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2055
        nodeslist.append(inst.pnode_uuid)
2056
        if inst.snode is not None:
2057
          (inst.snode_uuid, inst.snode) = \
2058
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2059
          nodeslist.append(inst.snode_uuid)
2060

    
2061
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2062
      # Lock resources of instance's primary and secondary nodes (copy to
2063
      # prevent accidential modification)
2064
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2065

    
2066
  def DeclareLocks(self, level):
2067
    if level == locking.LEVEL_NODE_RES and \
2068
      self.opportunistic_locks[locking.LEVEL_NODE]:
2069
      # Even when using opportunistic locking, we require the same set of
2070
      # NODE_RES locks as we got NODE locks
2071
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2072
        self.owned_locks(locking.LEVEL_NODE)
2073

    
2074
  def CheckPrereq(self):
2075
    """Check prerequisite.
2076

2077
    """
2078
    if self.op.iallocator:
2079
      cluster = self.cfg.GetClusterInfo()
2080
      default_vg = self.cfg.GetVGName()
2081
      ec_id = self.proc.GetECId()
2082

    
2083
      if self.op.opportunistic_locking:
2084
        # Only consider nodes for which a lock is held
2085
        node_whitelist = self.cfg.GetNodeNames(
2086
                           list(self.owned_locks(locking.LEVEL_NODE)))
2087
      else:
2088
        node_whitelist = None
2089

    
2090
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2091
                                           _ComputeNics(op, cluster, None,
2092
                                                        self.cfg, ec_id),
2093
                                           _ComputeFullBeParams(op, cluster),
2094
                                           node_whitelist)
2095
               for op in self.op.instances]
2096

    
2097
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2098
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2099

    
2100
      ial.Run(self.op.iallocator)
2101

    
2102
      if not ial.success:
2103
        raise errors.OpPrereqError("Can't compute nodes using"
2104
                                   " iallocator '%s': %s" %
2105
                                   (self.op.iallocator, ial.info),
2106
                                   errors.ECODE_NORES)
2107

    
2108
      self.ia_result = ial.result
2109

    
2110
    if self.op.dry_run:
2111
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2112
        constants.JOB_IDS_KEY: [],
2113
        })
2114

    
2115
  def _ConstructPartialResult(self):
2116
    """Contructs the partial result.
2117

2118
    """
2119
    if self.op.iallocator:
2120
      (allocatable, failed_insts) = self.ia_result
2121
      allocatable_insts = map(compat.fst, allocatable)
2122
    else:
2123
      allocatable_insts = [op.instance_name for op in self.op.instances]
2124
      failed_insts = []
2125

    
2126
    return {
2127
      constants.ALLOCATABLE_KEY: allocatable_insts,
2128
      constants.FAILED_KEY: failed_insts,
2129
      }
2130

    
2131
  def Exec(self, feedback_fn):
2132
    """Executes the opcode.
2133

2134
    """
2135
    jobs = []
2136
    if self.op.iallocator:
2137
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2138
      (allocatable, failed) = self.ia_result
2139

    
2140
      for (name, node_names) in allocatable:
2141
        op = op2inst.pop(name)
2142

    
2143
        (op.pnode_uuid, op.pnode) = \
2144
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2145
        if len(node_names) > 1:
2146
          (op.snode_uuid, op.snode) = \
2147
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2148

    
2149
          jobs.append([op])
2150

    
2151
        missing = set(op2inst.keys()) - set(failed)
2152
        assert not missing, \
2153
          "Iallocator did return incomplete result: %s" % \
2154
          utils.CommaJoin(missing)
2155
    else:
2156
      jobs.extend([op] for op in self.op.instances)
2157

    
2158
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2159

    
2160

    
2161
class _InstNicModPrivate:
2162
  """Data structure for network interface modifications.
2163

2164
  Used by L{LUInstanceSetParams}.
2165

2166
  """
2167
  def __init__(self):
2168
    self.params = None
2169
    self.filled = None
2170

    
2171

    
2172
def _PrepareContainerMods(mods, private_fn):
2173
  """Prepares a list of container modifications by adding a private data field.
2174

2175
  @type mods: list of tuples; (operation, index, parameters)
2176
  @param mods: List of modifications
2177
  @type private_fn: callable or None
2178
  @param private_fn: Callable for constructing a private data field for a
2179
    modification
2180
  @rtype: list
2181

2182
  """
2183
  if private_fn is None:
2184
    fn = lambda: None
2185
  else:
2186
    fn = private_fn
2187

    
2188
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2189

    
2190

    
2191
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2192
  """Checks if nodes have enough physical CPUs
2193

2194
  This function checks if all given nodes have the needed number of
2195
  physical CPUs. In case any node has less CPUs or we cannot get the
2196
  information from the node, this function raises an OpPrereqError
2197
  exception.
2198

2199
  @type lu: C{LogicalUnit}
2200
  @param lu: a logical unit from which we get configuration data
2201
  @type node_uuids: C{list}
2202
  @param node_uuids: the list of node UUIDs to check
2203
  @type requested: C{int}
2204
  @param requested: the minimum acceptable number of physical CPUs
2205
  @type hypervisor_specs: list of pairs (string, dict of strings)
2206
  @param hypervisor_specs: list of hypervisor specifications in
2207
      pairs (hypervisor_name, hvparams)
2208
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2209
      or we cannot check the node
2210

2211
  """
2212
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2213
  for node_uuid in node_uuids:
2214
    info = nodeinfo[node_uuid]
2215
    node_name = lu.cfg.GetNodeName(node_uuid)
2216
    info.Raise("Cannot get current information from node %s" % node_name,
2217
               prereq=True, ecode=errors.ECODE_ENVIRON)
2218
    (_, _, (hv_info, )) = info.payload
2219
    num_cpus = hv_info.get("cpu_total", None)
2220
    if not isinstance(num_cpus, int):
2221
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2222
                                 " on node %s, result was '%s'" %
2223
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2224
    if requested > num_cpus:
2225
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2226
                                 "required" % (node_name, num_cpus, requested),
2227
                                 errors.ECODE_NORES)
2228

    
2229

    
2230
def GetItemFromContainer(identifier, kind, container):
2231
  """Return the item refered by the identifier.
2232

2233
  @type identifier: string
2234
  @param identifier: Item index or name or UUID
2235
  @type kind: string
2236
  @param kind: One-word item description
2237
  @type container: list
2238
  @param container: Container to get the item from
2239

2240
  """
2241
  # Index
2242
  try:
2243
    idx = int(identifier)
2244
    if idx == -1:
2245
      # Append
2246
      absidx = len(container) - 1
2247
    elif idx < 0:
2248
      raise IndexError("Not accepting negative indices other than -1")
2249
    elif idx > len(container):
2250
      raise IndexError("Got %s index %s, but there are only %s" %
2251
                       (kind, idx, len(container)))
2252
    else:
2253
      absidx = idx
2254
    return (absidx, container[idx])
2255
  except ValueError:
2256
    pass
2257

    
2258
  for idx, item in enumerate(container):
2259
    if item.uuid == identifier or item.name == identifier:
2260
      return (idx, item)
2261

    
2262
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2263
                             (kind, identifier), errors.ECODE_NOENT)
2264

    
2265

    
2266
def _ApplyContainerMods(kind, container, chgdesc, mods,
2267
                        create_fn, modify_fn, remove_fn,
2268
                        post_add_fn=None):
2269
  """Applies descriptions in C{mods} to C{container}.
2270

2271
  @type kind: string
2272
  @param kind: One-word item description
2273
  @type container: list
2274
  @param container: Container to modify
2275
  @type chgdesc: None or list
2276
  @param chgdesc: List of applied changes
2277
  @type mods: list
2278
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2279
  @type create_fn: callable
2280
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2281
    receives absolute item index, parameters and private data object as added
2282
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2283
    as list
2284
  @type modify_fn: callable
2285
  @param modify_fn: Callback for modifying an existing item
2286
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2287
    and private data object as added by L{_PrepareContainerMods}, returns
2288
    changes as list
2289
  @type remove_fn: callable
2290
  @param remove_fn: Callback on removing item; receives absolute item index,
2291
    item and private data object as added by L{_PrepareContainerMods}
2292
  @type post_add_fn: callable
2293
  @param post_add_fn: Callable for post-processing a newly created item after
2294
    it has been put into the container. It receives the index of the new item
2295
    and the new item as parameters.
2296

2297
  """
2298
  for (op, identifier, params, private) in mods:
2299
    changes = None
2300

    
2301
    if op == constants.DDM_ADD:
2302
      # Calculate where item will be added
2303
      # When adding an item, identifier can only be an index
2304
      try:
2305
        idx = int(identifier)
2306
      except ValueError:
2307
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2308
                                   " identifier for %s" % constants.DDM_ADD,
2309
                                   errors.ECODE_INVAL)
2310
      if idx == -1:
2311
        addidx = len(container)
2312
      else:
2313
        if idx < 0:
2314
          raise IndexError("Not accepting negative indices other than -1")
2315
        elif idx > len(container):
2316
          raise IndexError("Got %s index %s, but there are only %s" %
2317
                           (kind, idx, len(container)))
2318
        addidx = idx
2319

    
2320
      if create_fn is None:
2321
        item = params
2322
      else:
2323
        (item, changes) = create_fn(addidx, params, private)
2324

    
2325
      if idx == -1:
2326
        container.append(item)
2327
      else:
2328
        assert idx >= 0
2329
        assert idx <= len(container)
2330
        # list.insert does so before the specified index
2331
        container.insert(idx, item)
2332

    
2333
      if post_add_fn is not None:
2334
        post_add_fn(addidx, item)
2335

    
2336
    else:
2337
      # Retrieve existing item
2338
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2339

    
2340
      if op == constants.DDM_REMOVE:
2341
        assert not params
2342

    
2343
        changes = [("%s/%s" % (kind, absidx), "remove")]
2344

    
2345
        if remove_fn is not None:
2346
          msg = remove_fn(absidx, item, private)
2347
          if msg:
2348
            changes.append(("%s/%s" % (kind, absidx), msg))
2349

    
2350
        assert container[absidx] == item
2351
        del container[absidx]
2352
      elif op == constants.DDM_MODIFY:
2353
        if modify_fn is not None:
2354
          changes = modify_fn(absidx, item, params, private)
2355
      else:
2356
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2357

    
2358
    assert _TApplyContModsCbChanges(changes)
2359

    
2360
    if not (chgdesc is None or changes is None):
2361
      chgdesc.extend(changes)
2362

    
2363

    
2364
def _UpdateIvNames(base_index, disks):
2365
  """Updates the C{iv_name} attribute of disks.
2366

2367
  @type disks: list of L{objects.Disk}
2368

2369
  """
2370
  for (idx, disk) in enumerate(disks):
2371
    disk.iv_name = "disk/%s" % (base_index + idx, )
2372

    
2373

    
2374
class LUInstanceSetParams(LogicalUnit):
2375
  """Modifies an instances's parameters.
2376

2377
  """
2378
  HPATH = "instance-modify"
2379
  HTYPE = constants.HTYPE_INSTANCE
2380
  REQ_BGL = False
2381

    
2382
  @staticmethod
2383
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2384
    assert ht.TList(mods)
2385
    assert not mods or len(mods[0]) in (2, 3)
2386

    
2387
    if mods and len(mods[0]) == 2:
2388
      result = []
2389

    
2390
      addremove = 0
2391
      for op, params in mods:
2392
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2393
          result.append((op, -1, params))
2394
          addremove += 1
2395

    
2396
          if addremove > 1:
2397
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2398
                                       " supported at a time" % kind,
2399
                                       errors.ECODE_INVAL)
2400
        else:
2401
          result.append((constants.DDM_MODIFY, op, params))
2402

    
2403
      assert verify_fn(result)
2404
    else:
2405
      result = mods
2406

    
2407
    return result
2408

    
2409
  @staticmethod
2410
  def _CheckMods(kind, mods, key_types, item_fn):
2411
    """Ensures requested disk/NIC modifications are valid.
2412

2413
    """
2414
    for (op, _, params) in mods:
2415
      assert ht.TDict(params)
2416

    
2417
      # If 'key_types' is an empty dict, we assume we have an
2418
      # 'ext' template and thus do not ForceDictType
2419
      if key_types:
2420
        utils.ForceDictType(params, key_types)
2421

    
2422
      if op == constants.DDM_REMOVE:
2423
        if params:
2424
          raise errors.OpPrereqError("No settings should be passed when"
2425
                                     " removing a %s" % kind,
2426
                                     errors.ECODE_INVAL)
2427
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2428
        item_fn(op, params)
2429
      else:
2430
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2431

    
2432
  def _VerifyDiskModification(self, op, params, excl_stor):
2433
    """Verifies a disk modification.
2434

2435
    """
2436
    if op == constants.DDM_ADD:
2437
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2438
      if mode not in constants.DISK_ACCESS_SET:
2439
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2440
                                   errors.ECODE_INVAL)
2441

    
2442
      size = params.get(constants.IDISK_SIZE, None)
2443
      if size is None:
2444
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2445
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2446
      size = int(size)
2447

    
2448
      params[constants.IDISK_SIZE] = size
2449
      name = params.get(constants.IDISK_NAME, None)
2450
      if name is not None and name.lower() == constants.VALUE_NONE:
2451
        params[constants.IDISK_NAME] = None
2452

    
2453
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2454

    
2455
    elif op == constants.DDM_MODIFY:
2456
      if constants.IDISK_SIZE in params:
2457
        raise errors.OpPrereqError("Disk size change not possible, use"
2458
                                   " grow-disk", errors.ECODE_INVAL)
2459

    
2460
      # Disk modification supports changing only the disk name and mode.
2461
      # Changing arbitrary parameters is allowed only for ext disk template",
2462
      if self.instance.disk_template != constants.DT_EXT:
2463
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2464

    
2465
      name = params.get(constants.IDISK_NAME, None)
2466
      if name is not None and name.lower() == constants.VALUE_NONE:
2467
        params[constants.IDISK_NAME] = None
2468

    
2469
  @staticmethod
2470
  def _VerifyNicModification(op, params):
2471
    """Verifies a network interface modification.
2472

2473
    """
2474
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2475
      ip = params.get(constants.INIC_IP, None)
2476
      name = params.get(constants.INIC_NAME, None)
2477
      req_net = params.get(constants.INIC_NETWORK, None)
2478
      link = params.get(constants.NIC_LINK, None)
2479
      mode = params.get(constants.NIC_MODE, None)
2480
      if name is not None and name.lower() == constants.VALUE_NONE:
2481
        params[constants.INIC_NAME] = None
2482
      if req_net is not None:
2483
        if req_net.lower() == constants.VALUE_NONE:
2484
          params[constants.INIC_NETWORK] = None
2485
          req_net = None
2486
        elif link is not None or mode is not None:
2487
          raise errors.OpPrereqError("If network is given"
2488
                                     " mode or link should not",
2489
                                     errors.ECODE_INVAL)
2490

    
2491
      if op == constants.DDM_ADD:
2492
        macaddr = params.get(constants.INIC_MAC, None)
2493
        if macaddr is None:
2494
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2495

    
2496
      if ip is not None:
2497
        if ip.lower() == constants.VALUE_NONE:
2498
          params[constants.INIC_IP] = None
2499
        else:
2500
          if ip.lower() == constants.NIC_IP_POOL:
2501
            if op == constants.DDM_ADD and req_net is None:
2502
              raise errors.OpPrereqError("If ip=pool, parameter network"
2503
                                         " cannot be none",
2504
                                         errors.ECODE_INVAL)
2505
          else:
2506
            if not netutils.IPAddress.IsValid(ip):
2507
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2508
                                         errors.ECODE_INVAL)
2509

    
2510
      if constants.INIC_MAC in params:
2511
        macaddr = params[constants.INIC_MAC]
2512
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2513
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2514

    
2515
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2516
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2517
                                     " modifying an existing NIC",
2518
                                     errors.ECODE_INVAL)
2519

    
2520
  def CheckArguments(self):
2521
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2522
            self.op.hvparams or self.op.beparams or self.op.os_name or
2523
            self.op.osparams or self.op.offline is not None or
2524
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2525
            self.op.instance_communication is not None):
2526
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2527

    
2528
    if self.op.hvparams:
2529
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2530
                           "hypervisor", "instance", "cluster")
2531

    
2532
    self.op.disks = self._UpgradeDiskNicMods(
2533
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2534
    self.op.nics = self._UpgradeDiskNicMods(
2535
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2536

    
2537
    if self.op.disks and self.op.disk_template is not None:
2538
      raise errors.OpPrereqError("Disk template conversion and other disk"
2539
                                 " changes not supported at the same time",
2540
                                 errors.ECODE_INVAL)
2541

    
2542
    if (self.op.disk_template and
2543
        self.op.disk_template in constants.DTS_INT_MIRROR and
2544
        self.op.remote_node is None):
2545
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2546
                                 " one requires specifying a secondary node",
2547
                                 errors.ECODE_INVAL)
2548

    
2549
    # Check NIC modifications
2550
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2551
                    self._VerifyNicModification)
2552

    
2553
    if self.op.pnode:
2554
      (self.op.pnode_uuid, self.op.pnode) = \
2555
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2556

    
2557
  def ExpandNames(self):
2558
    self._ExpandAndLockInstance()
2559
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2560
    # Can't even acquire node locks in shared mode as upcoming changes in
2561
    # Ganeti 2.6 will start to modify the node object on disk conversion
2562
    self.needed_locks[locking.LEVEL_NODE] = []
2563
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2564
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2565
    # Look node group to look up the ipolicy
2566
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2567

    
2568
  def DeclareLocks(self, level):
2569
    if level == locking.LEVEL_NODEGROUP:
2570
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2571
      # Acquire locks for the instance's nodegroups optimistically. Needs
2572
      # to be verified in CheckPrereq
2573
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2574
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2575
    elif level == locking.LEVEL_NODE:
2576
      self._LockInstancesNodes()
2577
      if self.op.disk_template and self.op.remote_node:
2578
        (self.op.remote_node_uuid, self.op.remote_node) = \
2579
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2580
                                self.op.remote_node)
2581
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2582
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2583
      # Copy node locks
2584
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2585
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2586

    
2587
  def BuildHooksEnv(self):
2588
    """Build hooks env.
2589

2590
    This runs on the master, primary and secondaries.
2591

2592
    """
2593
    args = {}
2594
    if constants.BE_MINMEM in self.be_new:
2595
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2596
    if constants.BE_MAXMEM in self.be_new:
2597
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2598
    if constants.BE_VCPUS in self.be_new:
2599
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2600
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2601
    # information at all.
2602

    
2603
    if self._new_nics is not None:
2604
      nics = []
2605

    
2606
      for nic in self._new_nics:
2607
        n = copy.deepcopy(nic)
2608
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2609
        n.nicparams = nicparams
2610
        nics.append(NICToTuple(self, n))
2611

    
2612
      args["nics"] = nics
2613

    
2614
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2615
    if self.op.disk_template:
2616
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2617
    if self.op.runtime_mem:
2618
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2619

    
2620
    return env
2621

    
2622
  def BuildHooksNodes(self):
2623
    """Build hooks nodes.
2624

2625
    """
2626
    nl = [self.cfg.GetMasterNode()] + \
2627
        list(self.cfg.GetInstanceNodes(self.instance))
2628
    return (nl, nl)
2629

    
2630
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2631
                              old_params, cluster, pnode_uuid):
2632

    
2633
    update_params_dict = dict([(key, params[key])
2634
                               for key in constants.NICS_PARAMETERS
2635
                               if key in params])
2636

    
2637
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2638
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2639

    
2640
    new_net_uuid = None
2641
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2642
    if new_net_uuid_or_name:
2643
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2644
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2645

    
2646
    if old_net_uuid:
2647
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2648

    
2649
    if new_net_uuid:
2650
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2651
      if not netparams:
2652
        raise errors.OpPrereqError("No netparams found for the network"
2653
                                   " %s, probably not connected" %
2654
                                   new_net_obj.name, errors.ECODE_INVAL)
2655
      new_params = dict(netparams)
2656
    else:
2657
      new_params = GetUpdatedParams(old_params, update_params_dict)
2658

    
2659
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2660

    
2661
    new_filled_params = cluster.SimpleFillNIC(new_params)
2662
    objects.NIC.CheckParameterSyntax(new_filled_params)
2663

    
2664
    new_mode = new_filled_params[constants.NIC_MODE]
2665
    if new_mode == constants.NIC_MODE_BRIDGED:
2666
      bridge = new_filled_params[constants.NIC_LINK]
2667
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2668
      if msg:
2669
        msg = "Error checking bridges on node '%s': %s" % \
2670
                (self.cfg.GetNodeName(pnode_uuid), msg)
2671
        if self.op.force:
2672
          self.warn.append(msg)
2673
        else:
2674
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2675

    
2676
    elif new_mode == constants.NIC_MODE_ROUTED:
2677
      ip = params.get(constants.INIC_IP, old_ip)
2678
      if ip is None:
2679
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2680
                                   " on a routed NIC", errors.ECODE_INVAL)
2681

    
2682
    elif new_mode == constants.NIC_MODE_OVS:
2683
      # TODO: check OVS link
2684
      self.LogInfo("OVS links are currently not checked for correctness")
2685

    
2686
    if constants.INIC_MAC in params:
2687
      mac = params[constants.INIC_MAC]
2688
      if mac is None:
2689
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2690
                                   errors.ECODE_INVAL)
2691
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2692
        # otherwise generate the MAC address
2693
        params[constants.INIC_MAC] = \
2694
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2695
      else:
2696
        # or validate/reserve the current one
2697
        try:
2698
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2699
        except errors.ReservationError:
2700
          raise errors.OpPrereqError("MAC address '%s' already in use"
2701
                                     " in cluster" % mac,
2702
                                     errors.ECODE_NOTUNIQUE)
2703
    elif new_net_uuid != old_net_uuid:
2704

    
2705
      def get_net_prefix(net_uuid):
2706
        mac_prefix = None
2707
        if net_uuid:
2708
          nobj = self.cfg.GetNetwork(net_uuid)
2709
          mac_prefix = nobj.mac_prefix
2710

    
2711
        return mac_prefix
2712

    
2713
      new_prefix = get_net_prefix(new_net_uuid)
2714
      old_prefix = get_net_prefix(old_net_uuid)
2715
      if old_prefix != new_prefix:
2716
        params[constants.INIC_MAC] = \
2717
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2718

    
2719
    # if there is a change in (ip, network) tuple
2720
    new_ip = params.get(constants.INIC_IP, old_ip)
2721
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2722
      if new_ip:
2723
        # if IP is pool then require a network and generate one IP
2724
        if new_ip.lower() == constants.NIC_IP_POOL:
2725
          if new_net_uuid:
2726
            try:
2727
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2728
            except errors.ReservationError:
2729
              raise errors.OpPrereqError("Unable to get a free IP"
2730
                                         " from the address pool",
2731
                                         errors.ECODE_STATE)
2732
            self.LogInfo("Chose IP %s from network %s",
2733
                         new_ip,
2734
                         new_net_obj.name)
2735
            params[constants.INIC_IP] = new_ip
2736
          else:
2737
            raise errors.OpPrereqError("ip=pool, but no network found",
2738
                                       errors.ECODE_INVAL)
2739
        # Reserve new IP if in the new network if any
2740
        elif new_net_uuid:
2741
          try:
2742
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2743
                               check=self.op.conflicts_check)
2744
            self.LogInfo("Reserving IP %s in network %s",
2745
                         new_ip, new_net_obj.name)
2746
          except errors.ReservationError:
2747
            raise errors.OpPrereqError("IP %s not available in network %s" %
2748
                                       (new_ip, new_net_obj.name),
2749
                                       errors.ECODE_NOTUNIQUE)
2750
        # new network is None so check if new IP is a conflicting IP
2751
        elif self.op.conflicts_check:
2752
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2753

    
2754
      # release old IP if old network is not None
2755
      if old_ip and old_net_uuid:
2756
        try:
2757
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2758
        except errors.AddressPoolError:
2759
          logging.warning("Release IP %s not contained in network %s",
2760
                          old_ip, old_net_obj.name)
2761

    
2762
    # there are no changes in (ip, network) tuple and old network is not None
2763
    elif (old_net_uuid is not None and
2764
          (req_link is not None or req_mode is not None)):
2765
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2766
                                 " a NIC that is connected to a network",
2767
                                 errors.ECODE_INVAL)
2768

    
2769
    private.params = new_params
2770
    private.filled = new_filled_params
2771

    
2772
  def _PreCheckDiskTemplate(self, pnode_info):
2773
    """CheckPrereq checks related to a new disk template."""
2774
    # Arguments are passed to avoid configuration lookups
2775
    pnode_uuid = self.instance.primary_node
2776
    if self.instance.disk_template == self.op.disk_template:
2777
      raise errors.OpPrereqError("Instance already has disk template %s" %
2778
                                 self.instance.disk_template,
2779
                                 errors.ECODE_INVAL)
2780

    
2781
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2782
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2783
                                 " cluster." % self.op.disk_template)
2784

    
2785
    if (self.instance.disk_template,
2786
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2787
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2788
                                 " %s to %s" % (self.instance.disk_template,
2789
                                                self.op.disk_template),
2790
                                 errors.ECODE_INVAL)
2791
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2792
                       msg="cannot change disk template")
2793
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2794
      if self.op.remote_node_uuid == pnode_uuid:
2795
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2796
                                   " as the primary node of the instance" %
2797
                                   self.op.remote_node, errors.ECODE_STATE)
2798
      CheckNodeOnline(self, self.op.remote_node_uuid)
2799
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2800
      # FIXME: here we assume that the old instance type is DT_PLAIN
2801
      assert self.instance.disk_template == constants.DT_PLAIN
2802
      disks = [{constants.IDISK_SIZE: d.size,
2803
                constants.IDISK_VG: d.logical_id[0]}
2804
               for d in self.instance.disks]
2805
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2806
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2807

    
2808
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2809
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2810
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2811
                                                              snode_group)
2812
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2813
                             ignore=self.op.ignore_ipolicy)
2814
      if pnode_info.group != snode_info.group:
2815
        self.LogWarning("The primary and secondary nodes are in two"
2816
                        " different node groups; the disk parameters"
2817
                        " from the first disk's node group will be"
2818
                        " used")
2819

    
2820
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2821
      # Make sure none of the nodes require exclusive storage
2822
      nodes = [pnode_info]
2823
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2824
        assert snode_info
2825
        nodes.append(snode_info)
2826
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2827
      if compat.any(map(has_es, nodes)):
2828
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2829
                  " storage is enabled" % (self.instance.disk_template,
2830
                                           self.op.disk_template))
2831
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2832

    
2833
  def _PreCheckDisks(self, ispec):
2834
    """CheckPrereq checks related to disk changes.
2835

2836
    @type ispec: dict
2837
    @param ispec: instance specs to be updated with the new disks
2838

2839
    """
2840
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2841

    
2842
    inst_nodes = self.cfg.GetInstanceNodes(self.instance)
2843
    excl_stor = compat.any(
2844
      rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
2845
      )
2846

    
2847
    # Check disk modifications. This is done here and not in CheckArguments
2848
    # (as with NICs), because we need to know the instance's disk template
2849
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2850
    if self.instance.disk_template == constants.DT_EXT:
2851
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2852
    else:
2853
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2854
                      ver_fn)
2855

    
2856
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2857

    
2858
    # Check the validity of the `provider' parameter
2859
    if self.instance.disk_template in constants.DT_EXT:
2860
      for mod in self.diskmod:
2861
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2862
        if mod[0] == constants.DDM_ADD:
2863
          if ext_provider is None:
2864
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2865
                                       " '%s' missing, during disk add" %
2866
                                       (constants.DT_EXT,
2867
                                        constants.IDISK_PROVIDER),
2868
                                       errors.ECODE_NOENT)
2869
        elif mod[0] == constants.DDM_MODIFY:
2870
          if ext_provider:
2871
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2872
                                       " modification" %
2873
                                       constants.IDISK_PROVIDER,
2874
                                       errors.ECODE_INVAL)
2875
    else:
2876
      for mod in self.diskmod:
2877
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2878
        if ext_provider is not None:
2879
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2880
                                     " instances of type '%s'" %
2881
                                     (constants.IDISK_PROVIDER,
2882
                                      constants.DT_EXT),
2883
                                     errors.ECODE_INVAL)
2884

    
2885
    if not self.op.wait_for_sync and self.instance.disks_active:
2886
      for mod in self.diskmod:
2887
        if mod[0] == constants.DDM_ADD:
2888
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2889
                                     " activated disks and"
2890
                                     " --no-wait-for-sync given.",
2891
                                     errors.ECODE_INVAL)
2892

    
2893
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2894
      raise errors.OpPrereqError("Disk operations not supported for"
2895
                                 " diskless instances", errors.ECODE_INVAL)
2896

    
2897
    def _PrepareDiskMod(_, disk, params, __):
2898
      disk.name = params.get(constants.IDISK_NAME, None)
2899

    
2900
    # Verify disk changes (operating on a copy)
2901
    disks = copy.deepcopy(self.instance.disks)
2902
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2903
                        _PrepareDiskMod, None)
2904
    utils.ValidateDeviceNames("disk", disks)
2905
    if len(disks) > constants.MAX_DISKS:
2906
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2907
                                 " more" % constants.MAX_DISKS,
2908
                                 errors.ECODE_STATE)
2909
    disk_sizes = [disk.size for disk in self.instance.disks]
2910
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2911
                      self.diskmod if op == constants.DDM_ADD)
2912
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2913
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2914

    
2915
    if self.op.offline is not None and self.op.offline:
2916
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2917
                         msg="can't change to offline")
2918

    
2919
  @staticmethod
2920
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2921
    """Create a NIC mod that adds or removes the instance
2922
    communication NIC to a running instance.
2923

2924
    The NICS are dynamically created using the Dynamic Device
2925
    Modification (DDM).  This function produces a NIC modification
2926
    (mod) that inserts an additional NIC meant for instance
2927
    communication in or removes an existing instance communication NIC
2928
    from a running instance, using DDM.
2929

2930
    @type cfg: L{config.ConfigWriter}
2931
    @param cfg: cluster configuration
2932

2933
    @type instance_communication: boolean
2934
    @param instance_communication: whether instance communication is
2935
                                   enabled or disabled
2936

2937
    @type instance: L{objects.Instance}
2938
    @param instance: instance to which the NIC mod will be applied to
2939

2940
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
2941
            (L{constants.DDM_REMOVE}, -1, parameters) or
2942
            L{None}
2943
    @return: DDM mod containing an action to add or remove the NIC, or
2944
             None if nothing needs to be done
2945

2946
    """
2947
    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
2948

    
2949
    instance_communication_nic = None
2950

    
2951
    for nic in instance.nics:
2952
      if nic.name == nic_name:
2953
        instance_communication_nic = nic
2954
        break
2955

    
2956
    if instance_communication and not instance_communication_nic:
2957
      action = constants.DDM_ADD
2958
      params = {constants.INIC_NAME: nic_name,
2959
                constants.INIC_MAC: constants.VALUE_GENERATE,
2960
                constants.INIC_IP: constants.NIC_IP_POOL,
2961
                constants.INIC_NETWORK:
2962
                  cfg.GetInstanceCommunicationNetwork()}
2963
    elif not instance_communication and instance_communication_nic:
2964
      action = constants.DDM_REMOVE
2965
      params = None
2966
    else:
2967
      action = None
2968
      params = None
2969

    
2970
    if action is not None:
2971
      return (action, -1, params)
2972
    else:
2973
      return None
2974

    
2975
  def CheckPrereq(self):
2976
    """Check prerequisites.
2977

2978
    This only checks the instance list against the existing names.
2979

2980
    """
2981
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2982
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2983
    self.cluster = self.cfg.GetClusterInfo()
2984
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2985

    
2986
    assert self.instance is not None, \
2987
      "Cannot retrieve locked instance %s" % self.op.instance_name
2988

    
2989
    pnode_uuid = self.instance.primary_node
2990

    
2991
    self.warn = []
2992

    
2993
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2994
        not self.op.force):
2995
      # verify that the instance is not up
2996
      instance_info = self.rpc.call_instance_info(
2997
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2998
          cluster_hvparams)
2999
      if instance_info.fail_msg:
3000
        self.warn.append("Can't get instance runtime information: %s" %
3001
                         instance_info.fail_msg)
3002
      elif instance_info.payload:
3003
        raise errors.OpPrereqError("Instance is still running on %s" %
3004
                                   self.cfg.GetNodeName(pnode_uuid),
3005
                                   errors.ECODE_STATE)
3006

    
3007
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3008
    node_uuids = list(self.cfg.GetInstanceNodes(self.instance))
3009
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3010

    
3011
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3012
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3013
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3014

    
3015
    # dictionary with instance information after the modification
3016
    ispec = {}
3017

    
3018
    if self.op.hotplug or self.op.hotplug_if_possible:
3019
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3020
                                               self.instance)
3021
      if result.fail_msg:
3022
        if self.op.hotplug:
3023
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3024
                       prereq=True)
3025
        else:
3026
          self.LogWarning(result.fail_msg)
3027
          self.op.hotplug = False
3028
          self.LogInfo("Modification will take place without hotplugging.")
3029
      else:
3030
        self.op.hotplug = True
3031

    
3032
    # Prepare NIC modifications
3033
    # add or remove NIC for instance communication
3034
    if self.op.instance_communication is not None:
3035
      mod = self._InstanceCommunicationDDM(self.cfg,
3036
                                           self.op.instance_communication,
3037
                                           self.instance)
3038
      if mod is not None:
3039
        self.op.nics.append(mod)
3040

    
3041
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3042

    
3043
    # OS change
3044
    if self.op.os_name and not self.op.force:
3045
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
3046
                     self.op.force_variant)
3047
      instance_os = self.op.os_name
3048
    else:
3049
      instance_os = self.instance.os
3050

    
3051
    assert not (self.op.disk_template and self.op.disks), \
3052
      "Can't modify disk template and apply disk changes at the same time"
3053

    
3054
    if self.op.disk_template:
3055
      self._PreCheckDiskTemplate(pnode_info)
3056

    
3057
    self._PreCheckDisks(ispec)
3058

    
3059
    # hvparams processing
3060
    if self.op.hvparams:
3061
      hv_type = self.instance.hypervisor
3062
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3063
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3064
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3065

    
3066
      # local check
3067
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3068
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3069
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3070
      self.hv_inst = i_hvdict # the new dict (without defaults)
3071
    else:
3072
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3073
                                                   self.instance.os,
3074
                                                   self.instance.hvparams)
3075
      self.hv_new = self.hv_inst = {}
3076

    
3077
    # beparams processing
3078
    if self.op.beparams:
3079
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3080
                                  use_none=True)
3081
      objects.UpgradeBeParams(i_bedict)
3082
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3083
      be_new = self.cluster.SimpleFillBE(i_bedict)
3084
      self.be_proposed = self.be_new = be_new # the new actual values
3085
      self.be_inst = i_bedict # the new dict (without defaults)
3086
    else:
3087
      self.be_new = self.be_inst = {}
3088
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3089
    be_old = self.cluster.FillBE(self.instance)
3090

    
3091
    # CPU param validation -- checking every time a parameter is
3092
    # changed to cover all cases where either CPU mask or vcpus have
3093
    # changed
3094
    if (constants.BE_VCPUS in self.be_proposed and
3095
        constants.HV_CPU_MASK in self.hv_proposed):
3096
      cpu_list = \
3097
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3098
      # Verify mask is consistent with number of vCPUs. Can skip this
3099
      # test if only 1 entry in the CPU mask, which means same mask
3100
      # is applied to all vCPUs.
3101
      if (len(cpu_list) > 1 and
3102
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3103
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3104
                                   " CPU mask [%s]" %
3105
                                   (self.be_proposed[constants.BE_VCPUS],
3106
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3107
                                   errors.ECODE_INVAL)
3108

    
3109
      # Only perform this test if a new CPU mask is given
3110
      if constants.HV_CPU_MASK in self.hv_new:
3111
        # Calculate the largest CPU number requested
3112
        max_requested_cpu = max(map(max, cpu_list))
3113
        # Check that all of the instance's nodes have enough physical CPUs to
3114
        # satisfy the requested CPU mask
3115
        hvspecs = [(self.instance.hypervisor,
3116
                    self.cfg.GetClusterInfo()
3117
                      .hvparams[self.instance.hypervisor])]
3118
        _CheckNodesPhysicalCPUs(self, self.cfg.GetInstanceNodes(self.instance),
3119
                                max_requested_cpu + 1,
3120
                                hvspecs)
3121

    
3122
    # osparams processing
3123
    if self.op.osparams or self.op.osparams_private:
3124
      public_parms = self.op.osparams or {}
3125
      private_parms = self.op.osparams_private or {}
3126
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3127

    
3128
      if dupe_keys:
3129
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3130
                                   utils.CommaJoin(dupe_keys))
3131

    
3132
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3133
                                      public_parms)
3134
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3135
                                              private_parms)
3136

    
3137
      CheckOSParams(self, True, node_uuids, instance_os,
3138
                    objects.FillDict(self.os_inst,
3139
                                     self.os_inst_private))
3140

    
3141
    else:
3142
      self.os_inst = {}
3143
      self.os_inst_private = {}
3144

    
3145
    #TODO(dynmem): do the appropriate check involving MINMEM
3146
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3147
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3148
      mem_check_list = [pnode_uuid]
3149
      if be_new[constants.BE_AUTO_BALANCE]:
3150
        # either we changed auto_balance to yes or it was from before
3151
        mem_check_list.extend(
3152
          self.cfg.GetInstanceSecondaryNodes(self.instance))
3153
      instance_info = self.rpc.call_instance_info(
3154
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3155
          cluster_hvparams)
3156
      hvspecs = [(self.instance.hypervisor,
3157
                  cluster_hvparams)]
3158
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3159
                                         hvspecs)
3160
      pninfo = nodeinfo[pnode_uuid]
3161
      msg = pninfo.fail_msg
3162
      if msg:
3163
        # Assume the primary node is unreachable and go ahead
3164
        self.warn.append("Can't get info from primary node %s: %s" %
3165
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3166
      else:
3167
        (_, _, (pnhvinfo, )) = pninfo.payload
3168
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3169
          self.warn.append("Node data from primary node %s doesn't contain"
3170
                           " free memory information" %
3171
                           self.cfg.GetNodeName(pnode_uuid))
3172
        elif instance_info.fail_msg:
3173
          self.warn.append("Can't get instance runtime information: %s" %
3174
                           instance_info.fail_msg)
3175
        else:
3176
          if instance_info.payload:
3177
            current_mem = int(instance_info.payload["memory"])
3178
          else:
3179
            # Assume instance not running
3180
            # (there is a slight race condition here, but it's not very
3181
            # probable, and we have no other way to check)
3182
            # TODO: Describe race condition
3183
            current_mem = 0
3184
          #TODO(dynmem): do the appropriate check involving MINMEM
3185
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3186
                      pnhvinfo["memory_free"])
3187
          if miss_mem > 0:
3188
            raise errors.OpPrereqError("This change will prevent the instance"
3189
                                       " from starting, due to %d MB of memory"
3190
                                       " missing on its primary node" %
3191
                                       miss_mem, errors.ECODE_NORES)
3192

    
3193
      if be_new[constants.BE_AUTO_BALANCE]:
3194
        secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
3195
        for node_uuid, nres in nodeinfo.items():
3196
          if node_uuid not in secondary_nodes:
3197
            continue
3198
          nres.Raise("Can't get info from secondary node %s" %
3199
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3200
                     ecode=errors.ECODE_STATE)
3201
          (_, _, (nhvinfo, )) = nres.payload
3202
          if not isinstance(nhvinfo.get("memory_free", None), int):
3203
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3204
                                       " memory information" %
3205
                                       self.cfg.GetNodeName(node_uuid),
3206
                                       errors.ECODE_STATE)
3207
          #TODO(dynmem): do the appropriate check involving MINMEM
3208
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3209
            raise errors.OpPrereqError("This change will prevent the instance"
3210
                                       " from failover to its secondary node"
3211
                                       " %s, due to not enough memory" %
3212
                                       self.cfg.GetNodeName(node_uuid),
3213
                                       errors.ECODE_STATE)
3214

    
3215
    if self.op.runtime_mem:
3216
      remote_info = self.rpc.call_instance_info(
3217
         self.instance.primary_node, self.instance.name,
3218
         self.instance.hypervisor,
3219
         cluster_hvparams)
3220
      remote_info.Raise("Error checking node %s" %
3221
                        self.cfg.GetNodeName(self.instance.primary_node))
3222
      if not remote_info.payload: # not running already
3223
        raise errors.OpPrereqError("Instance %s is not running" %
3224
                                   self.instance.name, errors.ECODE_STATE)
3225

    
3226
      current_memory = remote_info.payload["memory"]
3227
      if (not self.op.force and
3228
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3229
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3230
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3231
                                   " and %d MB of memory unless --force is"
3232
                                   " given" %
3233
                                   (self.instance.name,
3234
                                    self.be_proposed[constants.BE_MINMEM],
3235
                                    self.be_proposed[constants.BE_MAXMEM]),
3236
                                   errors.ECODE_INVAL)
3237

    
3238
      delta = self.op.runtime_mem - current_memory
3239
      if delta > 0:
3240
        CheckNodeFreeMemory(
3241
            self, self.instance.primary_node,
3242
            "ballooning memory for instance %s" % self.instance.name, delta,
3243
            self.instance.hypervisor,
3244
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3245

    
3246
    # make self.cluster visible in the functions below
3247
    cluster = self.cluster
3248

    
3249
    def _PrepareNicCreate(_, params, private):
3250
      self._PrepareNicModification(params, private, None, None,
3251
                                   {}, cluster, pnode_uuid)
3252
      return (None, None)
3253

    
3254
    def _PrepareNicMod(_, nic, params, private):
3255
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3256
                                   nic.nicparams, cluster, pnode_uuid)
3257
      return None
3258

    
3259
    def _PrepareNicRemove(_, params, __):
3260
      ip = params.ip
3261
      net = params.network
3262
      if net is not None and ip is not None:
3263
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3264

    
3265
    # Verify NIC changes (operating on copy)
3266
    nics = [nic.Copy() for nic in self.instance.nics]
3267
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3268
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3269
    if len(nics) > constants.MAX_NICS:
3270
      raise errors.OpPrereqError("Instance has too many network interfaces"
3271
                                 " (%d), cannot add more" % constants.MAX_NICS,
3272
                                 errors.ECODE_STATE)
3273

    
3274
    # Pre-compute NIC changes (necessary to use result in hooks)
3275
    self._nic_chgdesc = []
3276
    if self.nicmod:
3277
      # Operate on copies as this is still in prereq
3278
      nics = [nic.Copy() for nic in self.instance.nics]
3279
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3280
                          self._CreateNewNic, self._ApplyNicMods,
3281
                          self._RemoveNic)
3282
      # Verify that NIC names are unique and valid
3283
      utils.ValidateDeviceNames("NIC", nics)
3284
      self._new_nics = nics
3285
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3286
    else:
3287
      self._new_nics = None
3288
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3289

    
3290
    if not self.op.ignore_ipolicy:
3291
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3292
                                                              group_info)
3293

    
3294
      # Fill ispec with backend parameters
3295
      ispec[constants.ISPEC_SPINDLE_USE] = \
3296
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3297
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3298
                                                         None)
3299

    
3300
      # Copy ispec to verify parameters with min/max values separately
3301
      if self.op.disk_template:
3302
        new_disk_template = self.op.disk_template
3303
      else:
3304
        new_disk_template = self.instance.disk_template
3305
      ispec_max = ispec.copy()
3306
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3307
        self.be_new.get(constants.BE_MAXMEM, None)
3308
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3309
                                                     new_disk_template)
3310
      ispec_min = ispec.copy()
3311
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3312
        self.be_new.get(constants.BE_MINMEM, None)
3313
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3314
                                                     new_disk_template)
3315

    
3316
      if (res_max or res_min):
3317
        # FIXME: Improve error message by including information about whether
3318
        # the upper or lower limit of the parameter fails the ipolicy.
3319
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3320
               (group_info, group_info.name,
3321
                utils.CommaJoin(set(res_max + res_min))))
3322
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3323

    
3324
  def _ConvertPlainToDrbd(self, feedback_fn):
3325
    """Converts an instance from plain to drbd.
3326

3327
    """
3328
    feedback_fn("Converting template to drbd")
3329
    pnode_uuid = self.instance.primary_node
3330
    snode_uuid = self.op.remote_node_uuid
3331

    
3332
    assert self.instance.disk_template == constants.DT_PLAIN
3333

    
3334
    # create a fake disk info for _GenerateDiskTemplate
3335
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3336
                  constants.IDISK_VG: d.logical_id[0],
3337
                  constants.IDISK_NAME: d.name}
3338
                 for d in self.instance.disks]
3339
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3340
                                     self.instance.uuid, pnode_uuid,
3341
                                     [snode_uuid], disk_info, None, None, 0,
3342
                                     feedback_fn, self.diskparams)
3343
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3344
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3345
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3346
    info = GetInstanceInfoText(self.instance)
3347
    feedback_fn("Creating additional volumes...")
3348
    # first, create the missing data and meta devices
3349
    for disk in anno_disks:
3350
      # unfortunately this is... not too nice
3351
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3352
                           info, True, p_excl_stor)
3353
      for child in disk.children:
3354
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3355
                             s_excl_stor)
3356
    # at this stage, all new LVs have been created, we can rename the
3357
    # old ones
3358
    feedback_fn("Renaming original volumes...")
3359
    rename_list = [(o, n.children[0].logical_id)
3360
                   for (o, n) in zip(self.instance.disks, new_disks)]
3361
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3362
    result.Raise("Failed to rename original LVs")
3363

    
3364
    feedback_fn("Initializing DRBD devices...")
3365
    # all child devices are in place, we can now create the DRBD devices
3366
    try:
3367
      for disk in anno_disks:
3368
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3369
                                       (snode_uuid, s_excl_stor)]:
3370
          f_create = node_uuid == pnode_uuid
3371
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3372
                               f_create, excl_stor)
3373
    except errors.GenericError, e:
3374
      feedback_fn("Initializing of DRBD devices failed;"
3375
                  " renaming back original volumes...")
3376
      rename_back_list = [(n.children[0], o.logical_id)
3377
                          for (n, o) in zip(new_disks, self.instance.disks)]
3378
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3379
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3380
      raise
3381

    
3382
    # at this point, the instance has been modified
3383
    self.instance.disk_template = constants.DT_DRBD8
3384
    self.instance.disks = new_disks
3385
    self.cfg.Update(self.instance, feedback_fn)
3386

    
3387
    # Release node locks while waiting for sync
3388
    ReleaseLocks(self, locking.LEVEL_NODE)
3389

    
3390
    # disks are created, waiting for sync
3391
    disk_abort = not WaitForSync(self, self.instance,
3392
                                 oneshot=not self.op.wait_for_sync)
3393
    if disk_abort:
3394
      raise errors.OpExecError("There are some degraded disks for"
3395
                               " this instance, please cleanup manually")
3396

    
3397
    # Node resource locks will be released by caller
3398

    
3399
  def _ConvertDrbdToPlain(self, feedback_fn):
3400
    """Converts an instance from drbd to plain.
3401

3402
    """
3403
    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
3404
    assert len(secondary_nodes) == 1
3405
    assert self.instance.disk_template == constants.DT_DRBD8
3406

    
3407
    pnode_uuid = self.instance.primary_node
3408
    snode_uuid = secondary_nodes[0]
3409
    feedback_fn("Converting template to plain")
3410

    
3411
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3412
    new_disks = [d.children[0] for d in self.instance.disks]
3413

    
3414
    # copy over size, mode and name
3415
    for parent, child in zip(old_disks, new_disks):
3416
      child.size = parent.size
3417
      child.mode = parent.mode
3418
      child.name = parent.name
3419

    
3420
    # this is a DRBD disk, return its port to the pool
3421
    # NOTE: this must be done right before the call to cfg.Update!
3422
    for disk in old_disks:
3423
      tcp_port = disk.logical_id[2]
3424
      self.cfg.AddTcpUdpPort(tcp_port)
3425

    
3426
    # update instance structure
3427
    self.instance.disks = new_disks
3428
    self.instance.disk_template = constants.DT_PLAIN
3429
    _UpdateIvNames(0, self.instance.disks)
3430
    self.cfg.Update(self.instance, feedback_fn)
3431

    
3432
    # Release locks in case removing disks takes a while
3433
    ReleaseLocks(self, locking.LEVEL_NODE)
3434

    
3435
    feedback_fn("Removing volumes on the secondary node...")
3436
    for disk in old_disks:
3437
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3438
      result.Warn("Could not remove block device %s on node %s,"
3439
                  " continuing anyway" %
3440
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3441
                  self.LogWarning)
3442

    
3443
    feedback_fn("Removing unneeded volumes on the primary node...")
3444
    for idx, disk in enumerate(old_disks):
3445
      meta = disk.children[1]
3446
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3447
      result.Warn("Could not remove metadata for disk %d on node %s,"
3448
                  " continuing anyway" %
3449
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3450
                  self.LogWarning)
3451

    
3452
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3453
    self.LogInfo("Trying to hotplug device...")
3454
    msg = "hotplug:"
3455
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3456
                                          self.instance, action, dev_type,
3457
                                          (device, self.instance),
3458
                                          extra, seq)
3459
    if result.fail_msg:
3460
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3461
      self.LogInfo("Continuing execution..")
3462
      msg += "failed"
3463
    else:
3464
      self.LogInfo("Hotplug done.")
3465
      msg += "done"
3466
    return msg
3467

    
3468
  def _CreateNewDisk(self, idx, params, _):
3469
    """Creates a new disk.
3470

3471
    """
3472
    # add a new disk
3473
    if self.instance.disk_template in constants.DTS_FILEBASED:
3474
      (file_driver, file_path) = self.instance.disks[0].logical_id
3475
      file_path = os.path.dirname(file_path)
3476
    else:
3477
      file_driver = file_path = None
3478

    
3479
    secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance)
3480
    disk = \
3481
      GenerateDiskTemplate(self, self.instance.disk_template,
3482
                           self.instance.uuid, self.instance.primary_node,
3483
                           secondary_nodes, [params], file_path,
3484
                           file_driver, idx, self.Log, self.diskparams)[0]
3485

    
3486
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3487

    
3488
    if self.cluster.prealloc_wipe_disks:
3489
      # Wipe new disk
3490
      WipeOrCleanupDisks(self, self.instance,
3491
                         disks=[(idx, disk, 0)],
3492
                         cleanup=new_disks)
3493

    
3494
    changes = [
3495
      ("disk/%d" % idx,
3496
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3497
      ]
3498
    if self.op.hotplug:
3499
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3500
                                               (disk, self.instance),
3501
                                               self.instance.name, True, idx)
3502
      if result.fail_msg:
3503
        changes.append(("disk/%d" % idx, "assemble:failed"))
3504
        self.LogWarning("Can't assemble newly created disk %d: %s",
3505
                        idx, result.fail_msg)
3506
      else:
3507
        _, link_name = result.payload
3508
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3509
                                  constants.HOTPLUG_TARGET_DISK,
3510
                                  disk, link_name, idx)
3511
        changes.append(("disk/%d" % idx, msg))
3512

    
3513
    return (disk, changes)
3514

    
3515
  def _PostAddDisk(self, _, disk):
3516
    if not WaitForSync(self, self.instance, disks=[disk],
3517
                       oneshot=not self.op.wait_for_sync):
3518
      raise errors.OpExecError("Failed to sync disks of %s" %
3519
                               self.instance.name)
3520

    
3521
    # the disk is active at this point, so deactivate it if the instance disks
3522
    # are supposed to be inactive
3523
    if not self.instance.disks_active:
3524
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3525

    
3526
  def _ModifyDisk(self, idx, disk, params, _):
3527
    """Modifies a disk.
3528

3529
    """
3530
    changes = []
3531
    if constants.IDISK_MODE in params:
3532
      disk.mode = params.get(constants.IDISK_MODE)
3533
      changes.append(("disk.mode/%d" % idx, disk.mode))
3534

    
3535
    if constants.IDISK_NAME in params:
3536
      disk.name = params.get(constants.IDISK_NAME)
3537
      changes.append(("disk.name/%d" % idx, disk.name))
3538

    
3539
    # Modify arbitrary params in case instance template is ext
3540
    for key, value in params.iteritems():
3541
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3542
          self.instance.disk_template == constants.DT_EXT):
3543
        # stolen from GetUpdatedParams: default means reset/delete
3544
        if value.lower() == constants.VALUE_DEFAULT:
3545
          try:
3546
            del disk.params[key]
3547
          except KeyError:
3548
            pass
3549
        else:
3550
          disk.params[key] = value
3551
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3552

    
3553
    return changes
3554

    
3555
  def _RemoveDisk(self, idx, root, _):
3556
    """Removes a disk.
3557

3558
    """
3559
    hotmsg = ""
3560
    if self.op.hotplug:
3561
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3562
                                   constants.HOTPLUG_TARGET_DISK,
3563
                                   root, None, idx)
3564
      ShutdownInstanceDisks(self, self.instance, [root])
3565

    
3566
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3567
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3568
                             self.instance.primary_node):
3569
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3570
              .fail_msg
3571
      if msg:
3572
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3573
                        " continuing anyway", idx,
3574
                        self.cfg.GetNodeName(node_uuid), msg)
3575

    
3576
    # if this is a DRBD disk, return its port to the pool
3577
    if root.dev_type in constants.DTS_DRBD:
3578
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3579

    
3580
    return hotmsg
3581

    
3582
  def _CreateNewNic(self, idx, params, private):
3583
    """Creates data structure for a new network interface.
3584

3585
    """
3586
    mac = params[constants.INIC_MAC]
3587
    ip = params.get(constants.INIC_IP, None)
3588
    net = params.get(constants.INIC_NETWORK, None)
3589
    name = params.get(constants.INIC_NAME, None)
3590
    net_uuid = self.cfg.LookupNetwork(net)
3591
    #TODO: not private.filled?? can a nic have no nicparams??
3592
    nicparams = private.filled
3593
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3594
                       nicparams=nicparams)
3595
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3596

    
3597
    changes = [
3598
      ("nic.%d" % idx,
3599
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3600
       (mac, ip, private.filled[constants.NIC_MODE],
3601
       private.filled[constants.NIC_LINK], net)),
3602
      ]
3603

    
3604
    if self.op.hotplug:
3605
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3606
                                constants.HOTPLUG_TARGET_NIC,
3607
                                nobj, None, idx)
3608
      changes.append(("nic.%d" % idx, msg))
3609

    
3610
    return (nobj, changes)
3611

    
3612
  def _ApplyNicMods(self, idx, nic, params, private):
3613
    """Modifies a network interface.
3614

3615
    """
3616
    changes = []
3617

    
3618
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3619
      if key in params:
3620
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3621
        setattr(nic, key, params[key])
3622

    
3623
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3624
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3625
    if new_net_uuid != nic.network:
3626
      changes.append(("nic.network/%d" % idx, new_net))
3627
      nic.network = new_net_uuid
3628

    
3629
    if private.filled:
3630
      nic.nicparams = private.filled
3631

    
3632
      for (key, val) in nic.nicparams.items():
3633
        changes.append(("nic.%s/%d" % (key, idx), val))
3634

    
3635
    if self.op.hotplug:
3636
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3637
                                constants.HOTPLUG_TARGET_NIC,
3638
                                nic, None, idx)
3639
      changes.append(("nic/%d" % idx, msg))
3640

    
3641
    return changes
3642

    
3643
  def _RemoveNic(self, idx, nic, _):
3644
    if self.op.hotplug:
3645
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3646
                                 constants.HOTPLUG_TARGET_NIC,
3647
                                 nic, None, idx)
3648

    
3649
  def Exec(self, feedback_fn):
3650
    """Modifies an instance.
3651

3652
    All parameters take effect only at the next restart of the instance.
3653

3654
    """
3655
    # Process here the warnings from CheckPrereq, as we don't have a
3656
    # feedback_fn there.
3657
    # TODO: Replace with self.LogWarning
3658
    for warn in self.warn:
3659
      feedback_fn("WARNING: %s" % warn)
3660

    
3661
    assert ((self.op.disk_template is None) ^
3662
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3663
      "Not owning any node resource locks"
3664

    
3665
    result = []
3666

    
3667
    # New primary node
3668
    if self.op.pnode_uuid:
3669
      self.instance.primary_node = self.op.pnode_uuid
3670

    
3671
    # runtime memory
3672
    if self.op.runtime_mem:
3673
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3674
                                                     self.instance,
3675
                                                     self.op.runtime_mem)
3676
      rpcres.Raise("Cannot modify instance runtime memory")
3677
      result.append(("runtime_memory", self.op.runtime_mem))
3678

    
3679
    # Apply disk changes
3680
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3681
                        self._CreateNewDisk, self._ModifyDisk,
3682
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3683
    _UpdateIvNames(0, self.instance.disks)
3684

    
3685
    if self.op.disk_template:
3686
      if __debug__:
3687
        check_nodes = set(self.cfg.GetInstanceNodes(self.instance))
3688
        if self.op.remote_node_uuid:
3689
          check_nodes.add(self.op.remote_node_uuid)
3690
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3691
          owned = self.owned_locks(level)
3692
          assert not (check_nodes - owned), \
3693
            ("Not owning the correct locks, owning %r, expected at least %r" %
3694
             (owned, check_nodes))
3695

    
3696
      r_shut = ShutdownInstanceDisks(self, self.instance)
3697
      if not r_shut:
3698
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3699
                                 " proceed with disk template conversion")
3700
      mode = (self.instance.disk_template, self.op.disk_template)
3701
      try:
3702
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3703
      except:
3704
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3705
        raise
3706
      result.append(("disk_template", self.op.disk_template))
3707

    
3708
      assert self.instance.disk_template == self.op.disk_template, \
3709
        ("Expected disk template '%s', found '%s'" %
3710
         (self.op.disk_template, self.instance.disk_template))
3711

    
3712
    # Release node and resource locks if there are any (they might already have
3713
    # been released during disk conversion)
3714
    ReleaseLocks(self, locking.LEVEL_NODE)
3715
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3716

    
3717
    # Apply NIC changes
3718
    if self._new_nics is not None:
3719
      self.instance.nics = self._new_nics
3720
      result.extend(self._nic_chgdesc)
3721

    
3722
    # hvparams changes
3723
    if self.op.hvparams:
3724
      self.instance.hvparams = self.hv_inst
3725
      for key, val in self.op.hvparams.iteritems():
3726
        result.append(("hv/%s" % key, val))
3727

    
3728
    # beparams changes
3729
    if self.op.beparams:
3730
      self.instance.beparams = self.be_inst
3731
      for key, val in self.op.beparams.iteritems():
3732
        result.append(("be/%s" % key, val))
3733

    
3734
    # OS change
3735
    if self.op.os_name:
3736
      self.instance.os = self.op.os_name
3737

    
3738
    # osparams changes
3739
    if self.op.osparams:
3740
      self.instance.osparams = self.os_inst
3741
      for key, val in self.op.osparams.iteritems():
3742
        result.append(("os/%s" % key, val))
3743

    
3744
    if self.op.osparams_private:
3745
      self.instance.osparams_private = self.os_inst_private
3746
      for key, val in self.op.osparams_private.iteritems():
3747
        # Show the Private(...) blurb.
3748
        result.append(("os_private/%s" % key, repr(val)))
3749

    
3750
    if self.op.offline is None:
3751
      # Ignore
3752
      pass
3753
    elif self.op.offline:
3754
      # Mark instance as offline
3755
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3756
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3757
    else:
3758
      # Mark instance as online, but stopped
3759
      self.cfg.MarkInstanceDown(self.instance.uuid)
3760
      result.append(("admin_state", constants.ADMINST_DOWN))
3761

    
3762
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3763

    
3764
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3765
                self.owned_locks(locking.LEVEL_NODE)), \
3766
      "All node locks should have been released by now"
3767

    
3768
    return result
3769

    
3770
  _DISK_CONVERSIONS = {
3771
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3772
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3773
    }
3774

    
3775

    
3776
class LUInstanceChangeGroup(LogicalUnit):
3777
  HPATH = "instance-change-group"
3778
  HTYPE = constants.HTYPE_INSTANCE
3779
  REQ_BGL = False
3780

    
3781
  def ExpandNames(self):
3782
    self.share_locks = ShareAll()
3783

    
3784
    self.needed_locks = {
3785
      locking.LEVEL_NODEGROUP: [],
3786
      locking.LEVEL_NODE: [],
3787
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3788
      }
3789

    
3790
    self._ExpandAndLockInstance()
3791

    
3792
    if self.op.target_groups:
3793
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3794
                                  self.op.target_groups)
3795
    else:
3796
      self.req_target_uuids = None
3797

    
3798
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3799

    
3800
  def DeclareLocks(self, level):
3801
    if level == locking.LEVEL_NODEGROUP:
3802
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3803

    
3804
      if self.req_target_uuids:
3805
        lock_groups = set(self.req_target_uuids)
3806

    
3807
        # Lock all groups used by instance optimistically; this requires going
3808
        # via the node before it's locked, requiring verification later on
3809
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3810
        lock_groups.update(instance_groups)
3811
      else:
3812
        # No target groups, need to lock all of them
3813
        lock_groups = locking.ALL_SET
3814

    
3815
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3816

    
3817
    elif level == locking.LEVEL_NODE:
3818
      if self.req_target_uuids:
3819
        # Lock all nodes used by instances
3820
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3821
        self._LockInstancesNodes()
3822

    
3823
        # Lock all nodes in all potential target groups
3824
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3825
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3826
        member_nodes = [node_uuid
3827
                        for group in lock_groups
3828
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3829
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3830
      else:
3831
        # Lock all nodes as all groups are potential targets
3832
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3833

    
3834
  def CheckPrereq(self):
3835
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3836
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3837
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3838

    
3839
    assert (self.req_target_uuids is None or
3840
            owned_groups.issuperset(self.req_target_uuids))
3841
    assert owned_instance_names == set([self.op.instance_name])
3842

    
3843
    # Get instance information
3844
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3845

    
3846
    # Check if node groups for locked instance are still correct
3847
    assert owned_nodes.issuperset(self.cfg.GetInstanceNodes(self.instance)), \
3848
      ("Instance %s's nodes changed while we kept the lock" %
3849
       self.op.instance_name)
3850

    
3851
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3852
                                          owned_groups)
3853

    
3854
    if self.req_target_uuids:
3855
      # User requested specific target groups
3856
      self.target_uuids = frozenset(self.req_target_uuids)
3857
    else:
3858
      # All groups except those used by the instance are potential targets
3859
      self.target_uuids = owned_groups - inst_groups
3860

    
3861
    conflicting_groups = self.target_uuids & inst_groups
3862
    if conflicting_groups:
3863
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3864
                                 " used by the instance '%s'" %
3865
                                 (utils.CommaJoin(conflicting_groups),
3866
                                  self.op.instance_name),
3867
                                 errors.ECODE_INVAL)
3868

    
3869
    if not self.target_uuids:
3870
      raise errors.OpPrereqError("There are no possible target groups",
3871
                                 errors.ECODE_INVAL)
3872

    
3873
  def BuildHooksEnv(self):
3874
    """Build hooks env.
3875

3876
    """
3877
    assert self.target_uuids
3878

    
3879
    env = {
3880
      "TARGET_GROUPS": " ".join(self.target_uuids),
3881
      }
3882

    
3883
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3884

    
3885
    return env
3886

    
3887
  def BuildHooksNodes(self):
3888
    """Build hooks nodes.
3889

3890
    """
3891
    mn = self.cfg.GetMasterNode()
3892
    return ([mn], [mn])
3893

    
3894
  def Exec(self, feedback_fn):
3895
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3896

    
3897
    assert instances == [self.op.instance_name], "Instance not locked"
3898

    
3899
    req = iallocator.IAReqGroupChange(instances=instances,
3900
                                      target_groups=list(self.target_uuids))
3901
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3902

    
3903
    ial.Run(self.op.iallocator)
3904

    
3905
    if not ial.success:
3906
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3907
                                 " instance '%s' using iallocator '%s': %s" %
3908
                                 (self.op.instance_name, self.op.iallocator,
3909
                                  ial.info), errors.ECODE_NORES)
3910

    
3911
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3912

    
3913
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3914
                 " instance '%s'", len(jobs), self.op.instance_name)
3915

    
3916
    return ResultWithJobs(jobs)