Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ a09639d1

History | View | Annotate | Download (145.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
187
      raise errors.OpPrereqError("VLAN is given, but network mode is not"
188
                                 " openvswitch", errors.ECODE_INVAL)
189

    
190
    # ip validity checks
191
    if ip is None or ip.lower() == constants.VALUE_NONE:
192
      nic_ip = None
193
    elif ip.lower() == constants.VALUE_AUTO:
194
      if not op.name_check:
195
        raise errors.OpPrereqError("IP address set to auto but name checks"
196
                                   " have been skipped",
197
                                   errors.ECODE_INVAL)
198
      nic_ip = default_ip
199
    else:
200
      # We defer pool operations until later, so that the iallocator has
201
      # filled in the instance's node(s) dimara
202
      if ip.lower() == constants.NIC_IP_POOL:
203
        if net is None:
204
          raise errors.OpPrereqError("if ip=pool, parameter network"
205
                                     " must be passed too",
206
                                     errors.ECODE_INVAL)
207

    
208
      elif not netutils.IPAddress.IsValid(ip):
209
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210
                                   errors.ECODE_INVAL)
211

    
212
      nic_ip = ip
213

    
214
    # TODO: check the ip address for uniqueness
215
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
217
                                 errors.ECODE_INVAL)
218

    
219
    # MAC address verification
220
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222
      mac = utils.NormalizeAndValidateMac(mac)
223

    
224
      try:
225
        # TODO: We need to factor this out
226
        cfg.ReserveMAC(mac, ec_id)
227
      except errors.ReservationError:
228
        raise errors.OpPrereqError("MAC address %s already in use"
229
                                   " in cluster" % mac,
230
                                   errors.ECODE_NOTUNIQUE)
231

    
232
    #  Build nic parameters
233
    nicparams = {}
234
    if nic_mode_req:
235
      nicparams[constants.NIC_MODE] = nic_mode
236
    if link:
237
      nicparams[constants.NIC_LINK] = link
238
    if vlan:
239
      nicparams[constants.NIC_VLAN] = vlan
240

    
241
    check_params = cluster.SimpleFillNIC(nicparams)
242
    objects.NIC.CheckParameterSyntax(check_params)
243
    net_uuid = cfg.LookupNetwork(net)
244
    name = nic.get(constants.INIC_NAME, None)
245
    if name is not None and name.lower() == constants.VALUE_NONE:
246
      name = None
247
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248
                          network=net_uuid, nicparams=nicparams)
249
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
250
    nics.append(nic_obj)
251

    
252
  return nics
253

    
254

    
255
def _CheckForConflictingIp(lu, ip, node_uuid):
256
  """In case of conflicting IP address raise error.
257

258
  @type ip: string
259
  @param ip: IP address
260
  @type node_uuid: string
261
  @param node_uuid: node UUID
262

263
  """
264
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
265
  if conf_net is not None:
266
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267
                                " network %s, but the target NIC does not." %
268
                                (ip, conf_net)),
269
                               errors.ECODE_STATE)
270

    
271
  return (None, None)
272

    
273

    
274
def _ComputeIPolicyInstanceSpecViolation(
275
  ipolicy, instance_spec, disk_template,
276
  _compute_fn=ComputeIPolicySpecViolation):
277
  """Compute if instance specs meets the specs of ipolicy.
278

279
  @type ipolicy: dict
280
  @param ipolicy: The ipolicy to verify against
281
  @param instance_spec: dict
282
  @param instance_spec: The instance spec to verify
283
  @type disk_template: string
284
  @param disk_template: the disk template of the instance
285
  @param _compute_fn: The function to verify ipolicy (unittest only)
286
  @see: L{ComputeIPolicySpecViolation}
287

288
  """
289
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
295

    
296
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297
                     disk_sizes, spindle_use, disk_template)
298

    
299

    
300
def _CheckOSVariant(os_obj, name):
301
  """Check whether an OS name conforms to the os variants specification.
302

303
  @type os_obj: L{objects.OS}
304
  @param os_obj: OS object to check
305
  @type name: string
306
  @param name: OS name passed by the user, to check for validity
307

308
  """
309
  variant = objects.OS.GetVariant(name)
310
  if not os_obj.supported_variants:
311
    if variant:
312
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313
                                 " passed)" % (os_obj.name, variant),
314
                                 errors.ECODE_INVAL)
315
    return
316
  if not variant:
317
    raise errors.OpPrereqError("OS name must include a variant",
318
                               errors.ECODE_INVAL)
319

    
320
  if variant not in os_obj.supported_variants:
321
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
322

    
323

    
324
class LUInstanceCreate(LogicalUnit):
325
  """Create an instance.
326

327
  """
328
  HPATH = "instance-add"
329
  HTYPE = constants.HTYPE_INSTANCE
330
  REQ_BGL = False
331

    
332
  def _CheckDiskTemplateValid(self):
333
    """Checks validity of disk template.
334

335
    """
336
    cluster = self.cfg.GetClusterInfo()
337
    if self.op.disk_template is None:
338
      # FIXME: It would be better to take the default disk template from the
339
      # ipolicy, but for the ipolicy we need the primary node, which we get from
340
      # the iallocator, which wants the disk template as input. To solve this
341
      # chicken-and-egg problem, it should be possible to specify just a node
342
      # group from the iallocator and take the ipolicy from that.
343
      self.op.disk_template = cluster.enabled_disk_templates[0]
344
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
345

    
346
  def _CheckDiskArguments(self):
347
    """Checks validity of disk-related arguments.
348

349
    """
350
    # check that disk's names are unique and valid
351
    utils.ValidateDeviceNames("disk", self.op.disks)
352

    
353
    self._CheckDiskTemplateValid()
354

    
355
    # check disks. parameter names and consistent adopt/no-adopt strategy
356
    has_adopt = has_no_adopt = False
357
    for disk in self.op.disks:
358
      if self.op.disk_template != constants.DT_EXT:
359
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
360
      if constants.IDISK_ADOPT in disk:
361
        has_adopt = True
362
      else:
363
        has_no_adopt = True
364
    if has_adopt and has_no_adopt:
365
      raise errors.OpPrereqError("Either all disks are adopted or none is",
366
                                 errors.ECODE_INVAL)
367
    if has_adopt:
368
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
369
        raise errors.OpPrereqError("Disk adoption is not supported for the"
370
                                   " '%s' disk template" %
371
                                   self.op.disk_template,
372
                                   errors.ECODE_INVAL)
373
      if self.op.iallocator is not None:
374
        raise errors.OpPrereqError("Disk adoption not allowed with an"
375
                                   " iallocator script", errors.ECODE_INVAL)
376
      if self.op.mode == constants.INSTANCE_IMPORT:
377
        raise errors.OpPrereqError("Disk adoption not allowed for"
378
                                   " instance import", errors.ECODE_INVAL)
379
    else:
380
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
381
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
382
                                   " but no 'adopt' parameter given" %
383
                                   self.op.disk_template,
384
                                   errors.ECODE_INVAL)
385

    
386
    self.adopt_disks = has_adopt
387

    
388
  def _CheckVLANArguments(self):
389
    """ Check validity of VLANs if given
390

391
    """
392
    for nic in self.op.nics:
393
      vlan = nic.get(constants.INIC_VLAN, None)
394
      if vlan:
395
        if vlan[0] == ".":
396
          # vlan starting with dot means single untagged vlan,
397
          # might be followed by trunk (:)
398
          if not vlan[1:].isdigit():
399
            vlanlist = vlan[1:].split(':')
400
            for vl in vlanlist:
401
              if not vl.isdigit():
402
                raise errors.OpPrereqError("Specified VLAN parameter is "
403
                                           "invalid : %s" % vlan,
404
                                             errors.ECODE_INVAL)
405
        elif vlan[0] == ":":
406
          # Trunk - tagged only
407
          vlanlist = vlan[1:].split(':')
408
          for vl in vlanlist:
409
            if not vl.isdigit():
410
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                           " : %s" % vlan, errors.ECODE_INVAL)
412
        elif vlan.isdigit():
413
          # This is the simplest case. No dots, only single digit
414
          # -> Create untagged access port, dot needs to be added
415
          nic[constants.INIC_VLAN] = "." + vlan
416
        else:
417
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418
                                       " : %s" % vlan, errors.ECODE_INVAL)
419

    
420
  def CheckArguments(self):
421
    """Check arguments.
422

423
    """
424
    # do not require name_check to ease forward/backward compatibility
425
    # for tools
426
    if self.op.no_install and self.op.start:
427
      self.LogInfo("No-installation mode selected, disabling startup")
428
      self.op.start = False
429
    # validate/normalize the instance name
430
    self.op.instance_name = \
431
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
432

    
433
    if self.op.ip_check and not self.op.name_check:
434
      # TODO: make the ip check more flexible and not depend on the name check
435
      raise errors.OpPrereqError("Cannot do IP address check without a name"
436
                                 " check", errors.ECODE_INVAL)
437

    
438
    # check nics' parameter names
439
    for nic in self.op.nics:
440
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
441
    # check that NIC's parameters names are unique and valid
442
    utils.ValidateDeviceNames("NIC", self.op.nics)
443

    
444
    self._CheckVLANArguments()
445

    
446
    self._CheckDiskArguments()
447
    assert self.op.disk_template is not None
448

    
449
    # instance name verification
450
    if self.op.name_check:
451
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
452
      self.op.instance_name = self.hostname.name
453
      # used in CheckPrereq for ip ping check
454
      self.check_ip = self.hostname.ip
455
    else:
456
      self.check_ip = None
457

    
458
    # file storage checks
459
    if (self.op.file_driver and
460
        not self.op.file_driver in constants.FILE_DRIVER):
461
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
462
                                 self.op.file_driver, errors.ECODE_INVAL)
463

    
464
    # set default file_driver if unset and required
465
    if (not self.op.file_driver and
466
        self.op.disk_template in constants.DTS_FILEBASED):
467
      self.op.file_driver = constants.FD_LOOP
468

    
469
    ### Node/iallocator related checks
470
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
471

    
472
    if self.op.pnode is not None:
473
      if self.op.disk_template in constants.DTS_INT_MIRROR:
474
        if self.op.snode is None:
475
          raise errors.OpPrereqError("The networked disk templates need"
476
                                     " a mirror node", errors.ECODE_INVAL)
477
      elif self.op.snode:
478
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
479
                        " template")
480
        self.op.snode = None
481

    
482
    _CheckOpportunisticLocking(self.op)
483

    
484
    if self.op.mode == constants.INSTANCE_IMPORT:
485
      # On import force_variant must be True, because if we forced it at
486
      # initial install, our only chance when importing it back is that it
487
      # works again!
488
      self.op.force_variant = True
489

    
490
      if self.op.no_install:
491
        self.LogInfo("No-installation mode has no effect during import")
492

    
493
    elif self.op.mode == constants.INSTANCE_CREATE:
494
      if self.op.os_type is None:
495
        raise errors.OpPrereqError("No guest OS specified",
496
                                   errors.ECODE_INVAL)
497
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
498
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
499
                                   " installation" % self.op.os_type,
500
                                   errors.ECODE_STATE)
501
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
502
      self._cds = GetClusterDomainSecret()
503

    
504
      # Check handshake to ensure both clusters have the same domain secret
505
      src_handshake = self.op.source_handshake
506
      if not src_handshake:
507
        raise errors.OpPrereqError("Missing source handshake",
508
                                   errors.ECODE_INVAL)
509

    
510
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
511
                                                           src_handshake)
512
      if errmsg:
513
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
514
                                   errors.ECODE_INVAL)
515

    
516
      # Load and check source CA
517
      self.source_x509_ca_pem = self.op.source_x509_ca
518
      if not self.source_x509_ca_pem:
519
        raise errors.OpPrereqError("Missing source X509 CA",
520
                                   errors.ECODE_INVAL)
521

    
522
      try:
523
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
524
                                                    self._cds)
525
      except OpenSSL.crypto.Error, err:
526
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
527
                                   (err, ), errors.ECODE_INVAL)
528

    
529
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
530
      if errcode is not None:
531
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
532
                                   errors.ECODE_INVAL)
533

    
534
      self.source_x509_ca = cert
535

    
536
      src_instance_name = self.op.source_instance_name
537
      if not src_instance_name:
538
        raise errors.OpPrereqError("Missing source instance name",
539
                                   errors.ECODE_INVAL)
540

    
541
      self.source_instance_name = \
542
        netutils.GetHostname(name=src_instance_name).name
543

    
544
    else:
545
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
546
                                 self.op.mode, errors.ECODE_INVAL)
547

    
548
  def ExpandNames(self):
549
    """ExpandNames for CreateInstance.
550

551
    Figure out the right locks for instance creation.
552

553
    """
554
    self.needed_locks = {}
555

    
556
    # this is just a preventive check, but someone might still add this
557
    # instance in the meantime, and creation will fail at lock-add time
558
    if self.op.instance_name in\
559
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
560
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
561
                                 self.op.instance_name, errors.ECODE_EXISTS)
562

    
563
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
564

    
565
    if self.op.iallocator:
566
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
567
      # specifying a group on instance creation and then selecting nodes from
568
      # that group
569
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
570
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
571

    
572
      if self.op.opportunistic_locking:
573
        self.opportunistic_locks[locking.LEVEL_NODE] = True
574
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
575
    else:
576
      (self.op.pnode_uuid, self.op.pnode) = \
577
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
578
      nodelist = [self.op.pnode_uuid]
579
      if self.op.snode is not None:
580
        (self.op.snode_uuid, self.op.snode) = \
581
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
582
        nodelist.append(self.op.snode_uuid)
583
      self.needed_locks[locking.LEVEL_NODE] = nodelist
584

    
585
    # in case of import lock the source node too
586
    if self.op.mode == constants.INSTANCE_IMPORT:
587
      src_node = self.op.src_node
588
      src_path = self.op.src_path
589

    
590
      if src_path is None:
591
        self.op.src_path = src_path = self.op.instance_name
592

    
593
      if src_node is None:
594
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
595
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
596
        self.op.src_node = None
597
        if os.path.isabs(src_path):
598
          raise errors.OpPrereqError("Importing an instance from a path"
599
                                     " requires a source node option",
600
                                     errors.ECODE_INVAL)
601
      else:
602
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
603
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
604
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
605
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
606
        if not os.path.isabs(src_path):
607
          self.op.src_path = \
608
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
609

    
610
    self.needed_locks[locking.LEVEL_NODE_RES] = \
611
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
612

    
613
  def _RunAllocator(self):
614
    """Run the allocator based on input opcode.
615

616
    """
617
    if self.op.opportunistic_locking:
618
      # Only consider nodes for which a lock is held
619
      node_name_whitelist = self.cfg.GetNodeNames(
620
        self.owned_locks(locking.LEVEL_NODE))
621
    else:
622
      node_name_whitelist = None
623

    
624
    req = _CreateInstanceAllocRequest(self.op, self.disks,
625
                                      self.nics, self.be_full,
626
                                      node_name_whitelist)
627
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
628

    
629
    ial.Run(self.op.iallocator)
630

    
631
    if not ial.success:
632
      # When opportunistic locks are used only a temporary failure is generated
633
      if self.op.opportunistic_locking:
634
        ecode = errors.ECODE_TEMP_NORES
635
      else:
636
        ecode = errors.ECODE_NORES
637

    
638
      raise errors.OpPrereqError("Can't compute nodes using"
639
                                 " iallocator '%s': %s" %
640
                                 (self.op.iallocator, ial.info),
641
                                 ecode)
642

    
643
    (self.op.pnode_uuid, self.op.pnode) = \
644
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
645
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
646
                 self.op.instance_name, self.op.iallocator,
647
                 utils.CommaJoin(ial.result))
648

    
649
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
650

    
651
    if req.RequiredNodes() == 2:
652
      (self.op.snode_uuid, self.op.snode) = \
653
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
654

    
655
  def BuildHooksEnv(self):
656
    """Build hooks env.
657

658
    This runs on master, primary and secondary nodes of the instance.
659

660
    """
661
    env = {
662
      "ADD_MODE": self.op.mode,
663
      }
664
    if self.op.mode == constants.INSTANCE_IMPORT:
665
      env["SRC_NODE"] = self.op.src_node
666
      env["SRC_PATH"] = self.op.src_path
667
      env["SRC_IMAGES"] = self.src_images
668

    
669
    env.update(BuildInstanceHookEnv(
670
      name=self.op.instance_name,
671
      primary_node_name=self.op.pnode,
672
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
673
      status=self.op.start,
674
      os_type=self.op.os_type,
675
      minmem=self.be_full[constants.BE_MINMEM],
676
      maxmem=self.be_full[constants.BE_MAXMEM],
677
      vcpus=self.be_full[constants.BE_VCPUS],
678
      nics=NICListToTuple(self, self.nics),
679
      disk_template=self.op.disk_template,
680
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
681
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
682
             for d in self.disks],
683
      bep=self.be_full,
684
      hvp=self.hv_full,
685
      hypervisor_name=self.op.hypervisor,
686
      tags=self.op.tags,
687
      ))
688

    
689
    return env
690

    
691
  def BuildHooksNodes(self):
692
    """Build hooks nodes.
693

694
    """
695
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
696
    return nl, nl
697

    
698
  def _ReadExportInfo(self):
699
    """Reads the export information from disk.
700

701
    It will override the opcode source node and path with the actual
702
    information, if these two were not specified before.
703

704
    @return: the export information
705

706
    """
707
    assert self.op.mode == constants.INSTANCE_IMPORT
708

    
709
    if self.op.src_node_uuid is None:
710
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
711
      exp_list = self.rpc.call_export_list(locked_nodes)
712
      found = False
713
      for node_uuid in exp_list:
714
        if exp_list[node_uuid].fail_msg:
715
          continue
716
        if self.op.src_path in exp_list[node_uuid].payload:
717
          found = True
718
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
719
          self.op.src_node_uuid = node_uuid
720
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
721
                                            self.op.src_path)
722
          break
723
      if not found:
724
        raise errors.OpPrereqError("No export found for relative path %s" %
725
                                   self.op.src_path, errors.ECODE_INVAL)
726

    
727
    CheckNodeOnline(self, self.op.src_node_uuid)
728
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
729
    result.Raise("No export or invalid export found in dir %s" %
730
                 self.op.src_path)
731

    
732
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
733
    if not export_info.has_section(constants.INISECT_EXP):
734
      raise errors.ProgrammerError("Corrupted export config",
735
                                   errors.ECODE_ENVIRON)
736

    
737
    ei_version = export_info.get(constants.INISECT_EXP, "version")
738
    if int(ei_version) != constants.EXPORT_VERSION:
739
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
740
                                 (ei_version, constants.EXPORT_VERSION),
741
                                 errors.ECODE_ENVIRON)
742
    return export_info
743

    
744
  def _ReadExportParams(self, einfo):
745
    """Use export parameters as defaults.
746

747
    In case the opcode doesn't specify (as in override) some instance
748
    parameters, then try to use them from the export information, if
749
    that declares them.
750

751
    """
752
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
753

    
754
    if not self.op.disks:
755
      disks = []
756
      # TODO: import the disk iv_name too
757
      for idx in range(constants.MAX_DISKS):
758
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
759
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
760
          disks.append({constants.IDISK_SIZE: disk_sz})
761
      self.op.disks = disks
762
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
763
        raise errors.OpPrereqError("No disk info specified and the export"
764
                                   " is missing the disk information",
765
                                   errors.ECODE_INVAL)
766

    
767
    if not self.op.nics:
768
      nics = []
769
      for idx in range(constants.MAX_NICS):
770
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
771
          ndict = {}
772
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
773
            nic_param_name = "nic%d_%s" % (idx, name)
774
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
775
              v = einfo.get(constants.INISECT_INS, nic_param_name)
776
              ndict[name] = v
777
          nics.append(ndict)
778
        else:
779
          break
780
      self.op.nics = nics
781

    
782
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
783
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
784

    
785
    if (self.op.hypervisor is None and
786
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
787
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
788

    
789
    if einfo.has_section(constants.INISECT_HYP):
790
      # use the export parameters but do not override the ones
791
      # specified by the user
792
      for name, value in einfo.items(constants.INISECT_HYP):
793
        if name not in self.op.hvparams:
794
          self.op.hvparams[name] = value
795

    
796
    if einfo.has_section(constants.INISECT_BEP):
797
      # use the parameters, without overriding
798
      for name, value in einfo.items(constants.INISECT_BEP):
799
        if name not in self.op.beparams:
800
          self.op.beparams[name] = value
801
        # Compatibility for the old "memory" be param
802
        if name == constants.BE_MEMORY:
803
          if constants.BE_MAXMEM not in self.op.beparams:
804
            self.op.beparams[constants.BE_MAXMEM] = value
805
          if constants.BE_MINMEM not in self.op.beparams:
806
            self.op.beparams[constants.BE_MINMEM] = value
807
    else:
808
      # try to read the parameters old style, from the main section
809
      for name in constants.BES_PARAMETERS:
810
        if (name not in self.op.beparams and
811
            einfo.has_option(constants.INISECT_INS, name)):
812
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
813

    
814
    if einfo.has_section(constants.INISECT_OSP):
815
      # use the parameters, without overriding
816
      for name, value in einfo.items(constants.INISECT_OSP):
817
        if name not in self.op.osparams:
818
          self.op.osparams[name] = value
819

    
820
  def _RevertToDefaults(self, cluster):
821
    """Revert the instance parameters to the default values.
822

823
    """
824
    # hvparams
825
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
826
    for name in self.op.hvparams.keys():
827
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
828
        del self.op.hvparams[name]
829
    # beparams
830
    be_defs = cluster.SimpleFillBE({})
831
    for name in self.op.beparams.keys():
832
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
833
        del self.op.beparams[name]
834
    # nic params
835
    nic_defs = cluster.SimpleFillNIC({})
836
    for nic in self.op.nics:
837
      for name in constants.NICS_PARAMETERS:
838
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
839
          del nic[name]
840
    # osparams
841
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
842
    for name in self.op.osparams.keys():
843
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
844
        del self.op.osparams[name]
845

    
846
  def _CalculateFileStorageDir(self):
847
    """Calculate final instance file storage dir.
848

849
    """
850
    # file storage dir calculation/check
851
    self.instance_file_storage_dir = None
852
    if self.op.disk_template in constants.DTS_FILEBASED:
853
      # build the full file storage dir path
854
      joinargs = []
855

    
856
      if self.op.disk_template == constants.DT_SHARED_FILE:
857
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
858
      else:
859
        get_fsd_fn = self.cfg.GetFileStorageDir
860

    
861
      cfg_storagedir = get_fsd_fn()
862
      if not cfg_storagedir:
863
        raise errors.OpPrereqError("Cluster file storage dir not defined",
864
                                   errors.ECODE_STATE)
865
      joinargs.append(cfg_storagedir)
866

    
867
      if self.op.file_storage_dir is not None:
868
        joinargs.append(self.op.file_storage_dir)
869

    
870
      joinargs.append(self.op.instance_name)
871

    
872
      # pylint: disable=W0142
873
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
874

    
875
  def CheckPrereq(self): # pylint: disable=R0914
876
    """Check prerequisites.
877

878
    """
879
    self._CalculateFileStorageDir()
880

    
881
    if self.op.mode == constants.INSTANCE_IMPORT:
882
      export_info = self._ReadExportInfo()
883
      self._ReadExportParams(export_info)
884
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
885
    else:
886
      self._old_instance_name = None
887

    
888
    if (not self.cfg.GetVGName() and
889
        self.op.disk_template not in constants.DTS_NOT_LVM):
890
      raise errors.OpPrereqError("Cluster does not support lvm-based"
891
                                 " instances", errors.ECODE_STATE)
892

    
893
    if (self.op.hypervisor is None or
894
        self.op.hypervisor == constants.VALUE_AUTO):
895
      self.op.hypervisor = self.cfg.GetHypervisorType()
896

    
897
    cluster = self.cfg.GetClusterInfo()
898
    enabled_hvs = cluster.enabled_hypervisors
899
    if self.op.hypervisor not in enabled_hvs:
900
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
901
                                 " cluster (%s)" %
902
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
903
                                 errors.ECODE_STATE)
904

    
905
    # Check tag validity
906
    for tag in self.op.tags:
907
      objects.TaggableObject.ValidateTag(tag)
908

    
909
    # check hypervisor parameter syntax (locally)
910
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
911
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
912
                                      self.op.hvparams)
913
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
914
    hv_type.CheckParameterSyntax(filled_hvp)
915
    self.hv_full = filled_hvp
916
    # check that we don't specify global parameters on an instance
917
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
918
                         "instance", "cluster")
919

    
920
    # fill and remember the beparams dict
921
    self.be_full = _ComputeFullBeParams(self.op, cluster)
922

    
923
    # build os parameters
924
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
925

    
926
    # now that hvp/bep are in final format, let's reset to defaults,
927
    # if told to do so
928
    if self.op.identify_defaults:
929
      self._RevertToDefaults(cluster)
930

    
931
    # NIC buildup
932
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
933
                             self.proc.GetECId())
934

    
935
    # disk checks/pre-build
936
    default_vg = self.cfg.GetVGName()
937
    self.disks = ComputeDisks(self.op, default_vg)
938

    
939
    if self.op.mode == constants.INSTANCE_IMPORT:
940
      disk_images = []
941
      for idx in range(len(self.disks)):
942
        option = "disk%d_dump" % idx
943
        if export_info.has_option(constants.INISECT_INS, option):
944
          # FIXME: are the old os-es, disk sizes, etc. useful?
945
          export_name = export_info.get(constants.INISECT_INS, option)
946
          image = utils.PathJoin(self.op.src_path, export_name)
947
          disk_images.append(image)
948
        else:
949
          disk_images.append(False)
950

    
951
      self.src_images = disk_images
952

    
953
      if self.op.instance_name == self._old_instance_name:
954
        for idx, nic in enumerate(self.nics):
955
          if nic.mac == constants.VALUE_AUTO:
956
            nic_mac_ini = "nic%d_mac" % idx
957
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
958

    
959
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
960

    
961
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
962
    if self.op.ip_check:
963
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
964
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
965
                                   (self.check_ip, self.op.instance_name),
966
                                   errors.ECODE_NOTUNIQUE)
967

    
968
    #### mac address generation
969
    # By generating here the mac address both the allocator and the hooks get
970
    # the real final mac address rather than the 'auto' or 'generate' value.
971
    # There is a race condition between the generation and the instance object
972
    # creation, which means that we know the mac is valid now, but we're not
973
    # sure it will be when we actually add the instance. If things go bad
974
    # adding the instance will abort because of a duplicate mac, and the
975
    # creation job will fail.
976
    for nic in self.nics:
977
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
978
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
979

    
980
    #### allocator run
981

    
982
    if self.op.iallocator is not None:
983
      self._RunAllocator()
984

    
985
    # Release all unneeded node locks
986
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
987
                               self.op.src_node_uuid])
988
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
989
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
990
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
991

    
992
    assert (self.owned_locks(locking.LEVEL_NODE) ==
993
            self.owned_locks(locking.LEVEL_NODE_RES)), \
994
      "Node locks differ from node resource locks"
995

    
996
    #### node related checks
997

    
998
    # check primary node
999
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1000
    assert self.pnode is not None, \
1001
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1002
    if pnode.offline:
1003
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1004
                                 pnode.name, errors.ECODE_STATE)
1005
    if pnode.drained:
1006
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1007
                                 pnode.name, errors.ECODE_STATE)
1008
    if not pnode.vm_capable:
1009
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1010
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1011

    
1012
    self.secondaries = []
1013

    
1014
    # Fill in any IPs from IP pools. This must happen here, because we need to
1015
    # know the nic's primary node, as specified by the iallocator
1016
    for idx, nic in enumerate(self.nics):
1017
      net_uuid = nic.network
1018
      if net_uuid is not None:
1019
        nobj = self.cfg.GetNetwork(net_uuid)
1020
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1021
        if netparams is None:
1022
          raise errors.OpPrereqError("No netparams found for network"
1023
                                     " %s. Probably not connected to"
1024
                                     " node's %s nodegroup" %
1025
                                     (nobj.name, self.pnode.name),
1026
                                     errors.ECODE_INVAL)
1027
        self.LogInfo("NIC/%d inherits netparams %s" %
1028
                     (idx, netparams.values()))
1029
        nic.nicparams = dict(netparams)
1030
        if nic.ip is not None:
1031
          if nic.ip.lower() == constants.NIC_IP_POOL:
1032
            try:
1033
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1034
            except errors.ReservationError:
1035
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1036
                                         " from the address pool" % idx,
1037
                                         errors.ECODE_STATE)
1038
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1039
          else:
1040
            try:
1041
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1042
            except errors.ReservationError:
1043
              raise errors.OpPrereqError("IP address %s already in use"
1044
                                         " or does not belong to network %s" %
1045
                                         (nic.ip, nobj.name),
1046
                                         errors.ECODE_NOTUNIQUE)
1047

    
1048
      # net is None, ip None or given
1049
      elif self.op.conflicts_check:
1050
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1051

    
1052
    # mirror node verification
1053
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1054
      if self.op.snode_uuid == pnode.uuid:
1055
        raise errors.OpPrereqError("The secondary node cannot be the"
1056
                                   " primary node", errors.ECODE_INVAL)
1057
      CheckNodeOnline(self, self.op.snode_uuid)
1058
      CheckNodeNotDrained(self, self.op.snode_uuid)
1059
      CheckNodeVmCapable(self, self.op.snode_uuid)
1060
      self.secondaries.append(self.op.snode_uuid)
1061

    
1062
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1063
      if pnode.group != snode.group:
1064
        self.LogWarning("The primary and secondary nodes are in two"
1065
                        " different node groups; the disk parameters"
1066
                        " from the first disk's node group will be"
1067
                        " used")
1068

    
1069
    nodes = [pnode]
1070
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1071
      nodes.append(snode)
1072
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1073
    excl_stor = compat.any(map(has_es, nodes))
1074
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1075
      raise errors.OpPrereqError("Disk template %s not supported with"
1076
                                 " exclusive storage" % self.op.disk_template,
1077
                                 errors.ECODE_STATE)
1078
    for disk in self.disks:
1079
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1080

    
1081
    node_uuids = [pnode.uuid] + self.secondaries
1082

    
1083
    if not self.adopt_disks:
1084
      if self.op.disk_template == constants.DT_RBD:
1085
        # _CheckRADOSFreeSpace() is just a placeholder.
1086
        # Any function that checks prerequisites can be placed here.
1087
        # Check if there is enough space on the RADOS cluster.
1088
        CheckRADOSFreeSpace()
1089
      elif self.op.disk_template == constants.DT_EXT:
1090
        # FIXME: Function that checks prereqs if needed
1091
        pass
1092
      elif self.op.disk_template in constants.DTS_LVM:
1093
        # Check lv size requirements, if not adopting
1094
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1095
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1096
      else:
1097
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1098
        pass
1099

    
1100
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1101
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1102
                                disk[constants.IDISK_ADOPT])
1103
                     for disk in self.disks])
1104
      if len(all_lvs) != len(self.disks):
1105
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1106
                                   errors.ECODE_INVAL)
1107
      for lv_name in all_lvs:
1108
        try:
1109
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1110
          # to ReserveLV uses the same syntax
1111
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1112
        except errors.ReservationError:
1113
          raise errors.OpPrereqError("LV named %s used by another instance" %
1114
                                     lv_name, errors.ECODE_NOTUNIQUE)
1115

    
1116
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1117
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1118

    
1119
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1120
                                       vg_names.payload.keys())[pnode.uuid]
1121
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1122
      node_lvs = node_lvs.payload
1123

    
1124
      delta = all_lvs.difference(node_lvs.keys())
1125
      if delta:
1126
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1127
                                   utils.CommaJoin(delta),
1128
                                   errors.ECODE_INVAL)
1129
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1130
      if online_lvs:
1131
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1132
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1133
                                   errors.ECODE_STATE)
1134
      # update the size of disk based on what is found
1135
      for dsk in self.disks:
1136
        dsk[constants.IDISK_SIZE] = \
1137
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1138
                                        dsk[constants.IDISK_ADOPT])][0]))
1139

    
1140
    elif self.op.disk_template == constants.DT_BLOCK:
1141
      # Normalize and de-duplicate device paths
1142
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1143
                       for disk in self.disks])
1144
      if len(all_disks) != len(self.disks):
1145
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1146
                                   errors.ECODE_INVAL)
1147
      baddisks = [d for d in all_disks
1148
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1149
      if baddisks:
1150
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1151
                                   " cannot be adopted" %
1152
                                   (utils.CommaJoin(baddisks),
1153
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1154
                                   errors.ECODE_INVAL)
1155

    
1156
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1157
                                            list(all_disks))[pnode.uuid]
1158
      node_disks.Raise("Cannot get block device information from node %s" %
1159
                       pnode.name)
1160
      node_disks = node_disks.payload
1161
      delta = all_disks.difference(node_disks.keys())
1162
      if delta:
1163
        raise errors.OpPrereqError("Missing block device(s): %s" %
1164
                                   utils.CommaJoin(delta),
1165
                                   errors.ECODE_INVAL)
1166
      for dsk in self.disks:
1167
        dsk[constants.IDISK_SIZE] = \
1168
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1169

    
1170
    # Check disk access param to be compatible with specified hypervisor
1171
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1172
    node_group = self.cfg.GetNodeGroup(node_info.group)
1173
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1174
    access_type = disk_params[self.op.disk_template].get(
1175
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1176
    )
1177

    
1178
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1179
                                            self.op.disk_template,
1180
                                            access_type):
1181
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1182
                                 " used with %s disk access param" %
1183
                                 (self.op.hypervisor, access_type),
1184
                                  errors.ECODE_STATE)
1185

    
1186
    # Verify instance specs
1187
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1188
    ispec = {
1189
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1190
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1191
      constants.ISPEC_DISK_COUNT: len(self.disks),
1192
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1193
                                  for disk in self.disks],
1194
      constants.ISPEC_NIC_COUNT: len(self.nics),
1195
      constants.ISPEC_SPINDLE_USE: spindle_use,
1196
      }
1197

    
1198
    group_info = self.cfg.GetNodeGroup(pnode.group)
1199
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1200
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1201
                                               self.op.disk_template)
1202
    if not self.op.ignore_ipolicy and res:
1203
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1204
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1205
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1206

    
1207
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1208

    
1209
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1210
    # check OS parameters (remotely)
1211
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1212

    
1213
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1214

    
1215
    #TODO: _CheckExtParams (remotely)
1216
    # Check parameters for extstorage
1217

    
1218
    # memory check on primary node
1219
    #TODO(dynmem): use MINMEM for checking
1220
    if self.op.start:
1221
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1222
                                self.op.hvparams)
1223
      CheckNodeFreeMemory(self, self.pnode.uuid,
1224
                          "creating instance %s" % self.op.instance_name,
1225
                          self.be_full[constants.BE_MAXMEM],
1226
                          self.op.hypervisor, hvfull)
1227

    
1228
    self.dry_run_result = list(node_uuids)
1229

    
1230
  def Exec(self, feedback_fn):
1231
    """Create and add the instance to the cluster.
1232

1233
    """
1234
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1235
                self.owned_locks(locking.LEVEL_NODE)), \
1236
      "Node locks differ from node resource locks"
1237
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1238

    
1239
    ht_kind = self.op.hypervisor
1240
    if ht_kind in constants.HTS_REQ_PORT:
1241
      network_port = self.cfg.AllocatePort()
1242
    else:
1243
      network_port = None
1244

    
1245
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1246

    
1247
    # This is ugly but we got a chicken-egg problem here
1248
    # We can only take the group disk parameters, as the instance
1249
    # has no disks yet (we are generating them right here).
1250
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1251
    disks = GenerateDiskTemplate(self,
1252
                                 self.op.disk_template,
1253
                                 instance_uuid, self.pnode.uuid,
1254
                                 self.secondaries,
1255
                                 self.disks,
1256
                                 self.instance_file_storage_dir,
1257
                                 self.op.file_driver,
1258
                                 0,
1259
                                 feedback_fn,
1260
                                 self.cfg.GetGroupDiskParams(nodegroup))
1261

    
1262
    iobj = objects.Instance(name=self.op.instance_name,
1263
                            uuid=instance_uuid,
1264
                            os=self.op.os_type,
1265
                            primary_node=self.pnode.uuid,
1266
                            nics=self.nics, disks=disks,
1267
                            disk_template=self.op.disk_template,
1268
                            disks_active=False,
1269
                            admin_state=constants.ADMINST_DOWN,
1270
                            network_port=network_port,
1271
                            beparams=self.op.beparams,
1272
                            hvparams=self.op.hvparams,
1273
                            hypervisor=self.op.hypervisor,
1274
                            osparams=self.op.osparams,
1275
                            )
1276

    
1277
    if self.op.tags:
1278
      for tag in self.op.tags:
1279
        iobj.AddTag(tag)
1280

    
1281
    if self.adopt_disks:
1282
      if self.op.disk_template == constants.DT_PLAIN:
1283
        # rename LVs to the newly-generated names; we need to construct
1284
        # 'fake' LV disks with the old data, plus the new unique_id
1285
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1286
        rename_to = []
1287
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1288
          rename_to.append(t_dsk.logical_id)
1289
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1290
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1291
                                               zip(tmp_disks, rename_to))
1292
        result.Raise("Failed to rename adoped LVs")
1293
    else:
1294
      feedback_fn("* creating instance disks...")
1295
      try:
1296
        CreateDisks(self, iobj)
1297
      except errors.OpExecError:
1298
        self.LogWarning("Device creation failed")
1299
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1300
        raise
1301

    
1302
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1303

    
1304
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1305

    
1306
    # Declare that we don't want to remove the instance lock anymore, as we've
1307
    # added the instance to the config
1308
    del self.remove_locks[locking.LEVEL_INSTANCE]
1309

    
1310
    if self.op.mode == constants.INSTANCE_IMPORT:
1311
      # Release unused nodes
1312
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1313
    else:
1314
      # Release all nodes
1315
      ReleaseLocks(self, locking.LEVEL_NODE)
1316

    
1317
    disk_abort = False
1318
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1319
      feedback_fn("* wiping instance disks...")
1320
      try:
1321
        WipeDisks(self, iobj)
1322
      except errors.OpExecError, err:
1323
        logging.exception("Wiping disks failed")
1324
        self.LogWarning("Wiping instance disks failed (%s)", err)
1325
        disk_abort = True
1326

    
1327
    if disk_abort:
1328
      # Something is already wrong with the disks, don't do anything else
1329
      pass
1330
    elif self.op.wait_for_sync:
1331
      disk_abort = not WaitForSync(self, iobj)
1332
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1333
      # make sure the disks are not degraded (still sync-ing is ok)
1334
      feedback_fn("* checking mirrors status")
1335
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1336
    else:
1337
      disk_abort = False
1338

    
1339
    if disk_abort:
1340
      RemoveDisks(self, iobj)
1341
      self.cfg.RemoveInstance(iobj.uuid)
1342
      # Make sure the instance lock gets removed
1343
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1344
      raise errors.OpExecError("There are some degraded disks for"
1345
                               " this instance")
1346

    
1347
    # instance disks are now active
1348
    iobj.disks_active = True
1349

    
1350
    # Release all node resource locks
1351
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1352

    
1353
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1354
      if self.op.mode == constants.INSTANCE_CREATE:
1355
        if not self.op.no_install:
1356
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1357
                        not self.op.wait_for_sync)
1358
          if pause_sync:
1359
            feedback_fn("* pausing disk sync to install instance OS")
1360
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1361
                                                              (iobj.disks,
1362
                                                               iobj), True)
1363
            for idx, success in enumerate(result.payload):
1364
              if not success:
1365
                logging.warn("pause-sync of instance %s for disk %d failed",
1366
                             self.op.instance_name, idx)
1367

    
1368
          feedback_fn("* running the instance OS create scripts...")
1369
          # FIXME: pass debug option from opcode to backend
1370
          os_add_result = \
1371
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1372
                                          self.op.debug_level)
1373
          if pause_sync:
1374
            feedback_fn("* resuming disk sync")
1375
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1376
                                                              (iobj.disks,
1377
                                                               iobj), False)
1378
            for idx, success in enumerate(result.payload):
1379
              if not success:
1380
                logging.warn("resume-sync of instance %s for disk %d failed",
1381
                             self.op.instance_name, idx)
1382

    
1383
          os_add_result.Raise("Could not add os for instance %s"
1384
                              " on node %s" % (self.op.instance_name,
1385
                                               self.pnode.name))
1386

    
1387
      else:
1388
        if self.op.mode == constants.INSTANCE_IMPORT:
1389
          feedback_fn("* running the instance OS import scripts...")
1390

    
1391
          transfers = []
1392

    
1393
          for idx, image in enumerate(self.src_images):
1394
            if not image:
1395
              continue
1396

    
1397
            # FIXME: pass debug option from opcode to backend
1398
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1399
                                               constants.IEIO_FILE, (image, ),
1400
                                               constants.IEIO_SCRIPT,
1401
                                               ((iobj.disks[idx], iobj), idx),
1402
                                               None)
1403
            transfers.append(dt)
1404

    
1405
          import_result = \
1406
            masterd.instance.TransferInstanceData(self, feedback_fn,
1407
                                                  self.op.src_node_uuid,
1408
                                                  self.pnode.uuid,
1409
                                                  self.pnode.secondary_ip,
1410
                                                  self.op.compress,
1411
                                                  iobj, transfers)
1412
          if not compat.all(import_result):
1413
            self.LogWarning("Some disks for instance %s on node %s were not"
1414
                            " imported successfully" % (self.op.instance_name,
1415
                                                        self.pnode.name))
1416

    
1417
          rename_from = self._old_instance_name
1418

    
1419
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1420
          feedback_fn("* preparing remote import...")
1421
          # The source cluster will stop the instance before attempting to make
1422
          # a connection. In some cases stopping an instance can take a long
1423
          # time, hence the shutdown timeout is added to the connection
1424
          # timeout.
1425
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1426
                             self.op.source_shutdown_timeout)
1427
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1428

    
1429
          assert iobj.primary_node == self.pnode.uuid
1430
          disk_results = \
1431
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1432
                                          self.source_x509_ca,
1433
                                          self._cds, self.op.compress, timeouts)
1434
          if not compat.all(disk_results):
1435
            # TODO: Should the instance still be started, even if some disks
1436
            # failed to import (valid for local imports, too)?
1437
            self.LogWarning("Some disks for instance %s on node %s were not"
1438
                            " imported successfully" % (self.op.instance_name,
1439
                                                        self.pnode.name))
1440

    
1441
          rename_from = self.source_instance_name
1442

    
1443
        else:
1444
          # also checked in the prereq part
1445
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1446
                                       % self.op.mode)
1447

    
1448
        # Run rename script on newly imported instance
1449
        assert iobj.name == self.op.instance_name
1450
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1451
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1452
                                                   rename_from,
1453
                                                   self.op.debug_level)
1454
        result.Warn("Failed to run rename script for %s on node %s" %
1455
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1456

    
1457
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1458

    
1459
    if self.op.start:
1460
      iobj.admin_state = constants.ADMINST_UP
1461
      self.cfg.Update(iobj, feedback_fn)
1462
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1463
                   self.pnode.name)
1464
      feedback_fn("* starting instance...")
1465
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1466
                                            False, self.op.reason)
1467
      result.Raise("Could not start instance")
1468

    
1469
    return list(iobj.all_nodes)
1470

    
1471

    
1472
class LUInstanceRename(LogicalUnit):
1473
  """Rename an instance.
1474

1475
  """
1476
  HPATH = "instance-rename"
1477
  HTYPE = constants.HTYPE_INSTANCE
1478

    
1479
  def CheckArguments(self):
1480
    """Check arguments.
1481

1482
    """
1483
    if self.op.ip_check and not self.op.name_check:
1484
      # TODO: make the ip check more flexible and not depend on the name check
1485
      raise errors.OpPrereqError("IP address check requires a name check",
1486
                                 errors.ECODE_INVAL)
1487

    
1488
  def BuildHooksEnv(self):
1489
    """Build hooks env.
1490

1491
    This runs on master, primary and secondary nodes of the instance.
1492

1493
    """
1494
    env = BuildInstanceHookEnvByObject(self, self.instance)
1495
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1496
    return env
1497

    
1498
  def BuildHooksNodes(self):
1499
    """Build hooks nodes.
1500

1501
    """
1502
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1503
    return (nl, nl)
1504

    
1505
  def CheckPrereq(self):
1506
    """Check prerequisites.
1507

1508
    This checks that the instance is in the cluster and is not running.
1509

1510
    """
1511
    (self.op.instance_uuid, self.op.instance_name) = \
1512
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1513
                                self.op.instance_name)
1514
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1515
    assert instance is not None
1516

    
1517
    # It should actually not happen that an instance is running with a disabled
1518
    # disk template, but in case it does, the renaming of file-based instances
1519
    # will fail horribly. Thus, we test it before.
1520
    if (instance.disk_template in constants.DTS_FILEBASED and
1521
        self.op.new_name != instance.name):
1522
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1523
                               instance.disk_template)
1524

    
1525
    CheckNodeOnline(self, instance.primary_node)
1526
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1527
                       msg="cannot rename")
1528
    self.instance = instance
1529

    
1530
    new_name = self.op.new_name
1531
    if self.op.name_check:
1532
      hostname = _CheckHostnameSane(self, new_name)
1533
      new_name = self.op.new_name = hostname.name
1534
      if (self.op.ip_check and
1535
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1536
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1537
                                   (hostname.ip, new_name),
1538
                                   errors.ECODE_NOTUNIQUE)
1539

    
1540
    instance_names = [inst.name for
1541
                      inst in self.cfg.GetAllInstancesInfo().values()]
1542
    if new_name in instance_names and new_name != instance.name:
1543
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1544
                                 new_name, errors.ECODE_EXISTS)
1545

    
1546
  def Exec(self, feedback_fn):
1547
    """Rename the instance.
1548

1549
    """
1550
    old_name = self.instance.name
1551

    
1552
    rename_file_storage = False
1553
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1554
        self.op.new_name != self.instance.name):
1555
      old_file_storage_dir = os.path.dirname(
1556
                               self.instance.disks[0].logical_id[1])
1557
      rename_file_storage = True
1558

    
1559
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1560
    # Change the instance lock. This is definitely safe while we hold the BGL.
1561
    # Otherwise the new lock would have to be added in acquired mode.
1562
    assert self.REQ_BGL
1563
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1564
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1565
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1566

    
1567
    # re-read the instance from the configuration after rename
1568
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1569

    
1570
    if rename_file_storage:
1571
      new_file_storage_dir = os.path.dirname(
1572
                               renamed_inst.disks[0].logical_id[1])
1573
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1574
                                                     old_file_storage_dir,
1575
                                                     new_file_storage_dir)
1576
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1577
                   " (but the instance has been renamed in Ganeti)" %
1578
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1579
                    old_file_storage_dir, new_file_storage_dir))
1580

    
1581
    StartInstanceDisks(self, renamed_inst, None)
1582
    # update info on disks
1583
    info = GetInstanceInfoText(renamed_inst)
1584
    for (idx, disk) in enumerate(renamed_inst.disks):
1585
      for node_uuid in renamed_inst.all_nodes:
1586
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1587
                                                (disk, renamed_inst), info)
1588
        result.Warn("Error setting info on node %s for disk %s" %
1589
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1590
    try:
1591
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1592
                                                 renamed_inst, old_name,
1593
                                                 self.op.debug_level)
1594
      result.Warn("Could not run OS rename script for instance %s on node %s"
1595
                  " (but the instance has been renamed in Ganeti)" %
1596
                  (renamed_inst.name,
1597
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1598
                  self.LogWarning)
1599
    finally:
1600
      ShutdownInstanceDisks(self, renamed_inst)
1601

    
1602
    return renamed_inst.name
1603

    
1604

    
1605
class LUInstanceRemove(LogicalUnit):
1606
  """Remove an instance.
1607

1608
  """
1609
  HPATH = "instance-remove"
1610
  HTYPE = constants.HTYPE_INSTANCE
1611
  REQ_BGL = False
1612

    
1613
  def ExpandNames(self):
1614
    self._ExpandAndLockInstance()
1615
    self.needed_locks[locking.LEVEL_NODE] = []
1616
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1617
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1618

    
1619
  def DeclareLocks(self, level):
1620
    if level == locking.LEVEL_NODE:
1621
      self._LockInstancesNodes()
1622
    elif level == locking.LEVEL_NODE_RES:
1623
      # Copy node locks
1624
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1625
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1626

    
1627
  def BuildHooksEnv(self):
1628
    """Build hooks env.
1629

1630
    This runs on master, primary and secondary nodes of the instance.
1631

1632
    """
1633
    env = BuildInstanceHookEnvByObject(self, self.instance)
1634
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1635
    return env
1636

    
1637
  def BuildHooksNodes(self):
1638
    """Build hooks nodes.
1639

1640
    """
1641
    nl = [self.cfg.GetMasterNode()]
1642
    nl_post = list(self.instance.all_nodes) + nl
1643
    return (nl, nl_post)
1644

    
1645
  def CheckPrereq(self):
1646
    """Check prerequisites.
1647

1648
    This checks that the instance is in the cluster.
1649

1650
    """
1651
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1652
    assert self.instance is not None, \
1653
      "Cannot retrieve locked instance %s" % self.op.instance_name
1654

    
1655
  def Exec(self, feedback_fn):
1656
    """Remove the instance.
1657

1658
    """
1659
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1660
                 self.cfg.GetNodeName(self.instance.primary_node))
1661

    
1662
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1663
                                             self.instance,
1664
                                             self.op.shutdown_timeout,
1665
                                             self.op.reason)
1666
    if self.op.ignore_failures:
1667
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1668
    else:
1669
      result.Raise("Could not shutdown instance %s on node %s" %
1670
                   (self.instance.name,
1671
                    self.cfg.GetNodeName(self.instance.primary_node)))
1672

    
1673
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1674
            self.owned_locks(locking.LEVEL_NODE_RES))
1675
    assert not (set(self.instance.all_nodes) -
1676
                self.owned_locks(locking.LEVEL_NODE)), \
1677
      "Not owning correct locks"
1678

    
1679
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1680

    
1681

    
1682
class LUInstanceMove(LogicalUnit):
1683
  """Move an instance by data-copying.
1684

1685
  """
1686
  HPATH = "instance-move"
1687
  HTYPE = constants.HTYPE_INSTANCE
1688
  REQ_BGL = False
1689

    
1690
  def ExpandNames(self):
1691
    self._ExpandAndLockInstance()
1692
    (self.op.target_node_uuid, self.op.target_node) = \
1693
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1694
                            self.op.target_node)
1695
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1696
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1697
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1698

    
1699
  def DeclareLocks(self, level):
1700
    if level == locking.LEVEL_NODE:
1701
      self._LockInstancesNodes(primary_only=True)
1702
    elif level == locking.LEVEL_NODE_RES:
1703
      # Copy node locks
1704
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1705
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1706

    
1707
  def BuildHooksEnv(self):
1708
    """Build hooks env.
1709

1710
    This runs on master, primary and target nodes of the instance.
1711

1712
    """
1713
    env = {
1714
      "TARGET_NODE": self.op.target_node,
1715
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1716
      }
1717
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1718
    return env
1719

    
1720
  def BuildHooksNodes(self):
1721
    """Build hooks nodes.
1722

1723
    """
1724
    nl = [
1725
      self.cfg.GetMasterNode(),
1726
      self.instance.primary_node,
1727
      self.op.target_node_uuid,
1728
      ]
1729
    return (nl, nl)
1730

    
1731
  def CheckPrereq(self):
1732
    """Check prerequisites.
1733

1734
    This checks that the instance is in the cluster.
1735

1736
    """
1737
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1738
    assert self.instance is not None, \
1739
      "Cannot retrieve locked instance %s" % self.op.instance_name
1740

    
1741
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1742
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1743
                                 self.instance.disk_template,
1744
                                 errors.ECODE_STATE)
1745

    
1746
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1747
    assert target_node is not None, \
1748
      "Cannot retrieve locked node %s" % self.op.target_node
1749

    
1750
    self.target_node_uuid = target_node.uuid
1751
    if target_node.uuid == self.instance.primary_node:
1752
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1753
                                 (self.instance.name, target_node.name),
1754
                                 errors.ECODE_STATE)
1755

    
1756
    cluster = self.cfg.GetClusterInfo()
1757
    bep = cluster.FillBE(self.instance)
1758

    
1759
    for idx, dsk in enumerate(self.instance.disks):
1760
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1761
                              constants.DT_SHARED_FILE):
1762
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1763
                                   " cannot copy" % idx, errors.ECODE_STATE)
1764

    
1765
    CheckNodeOnline(self, target_node.uuid)
1766
    CheckNodeNotDrained(self, target_node.uuid)
1767
    CheckNodeVmCapable(self, target_node.uuid)
1768
    group_info = self.cfg.GetNodeGroup(target_node.group)
1769
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1770
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1771
                           ignore=self.op.ignore_ipolicy)
1772

    
1773
    if self.instance.admin_state == constants.ADMINST_UP:
1774
      # check memory requirements on the target node
1775
      CheckNodeFreeMemory(
1776
          self, target_node.uuid, "failing over instance %s" %
1777
          self.instance.name, bep[constants.BE_MAXMEM],
1778
          self.instance.hypervisor,
1779
          cluster.hvparams[self.instance.hypervisor])
1780
    else:
1781
      self.LogInfo("Not checking memory on the secondary node as"
1782
                   " instance will not be started")
1783

    
1784
    # check bridge existance
1785
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1786

    
1787
  def Exec(self, feedback_fn):
1788
    """Move an instance.
1789

1790
    The move is done by shutting it down on its present node, copying
1791
    the data over (slow) and starting it on the new node.
1792

1793
    """
1794
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1795
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1796

    
1797
    self.LogInfo("Shutting down instance %s on source node %s",
1798
                 self.instance.name, source_node.name)
1799

    
1800
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1801
            self.owned_locks(locking.LEVEL_NODE_RES))
1802

    
1803
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1804
                                             self.op.shutdown_timeout,
1805
                                             self.op.reason)
1806
    if self.op.ignore_consistency:
1807
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1808
                  " anyway. Please make sure node %s is down. Error details" %
1809
                  (self.instance.name, source_node.name, source_node.name),
1810
                  self.LogWarning)
1811
    else:
1812
      result.Raise("Could not shutdown instance %s on node %s" %
1813
                   (self.instance.name, source_node.name))
1814

    
1815
    # create the target disks
1816
    try:
1817
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1818
    except errors.OpExecError:
1819
      self.LogWarning("Device creation failed")
1820
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1821
      raise
1822

    
1823
    errs = []
1824
    transfers = []
1825
    # activate, get path, create transfer jobs
1826
    for idx, disk in enumerate(self.instance.disks):
1827
      # FIXME: pass debug option from opcode to backend
1828
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1829
                                         constants.IEIO_RAW_DISK,
1830
                                         (disk, self.instance),
1831
                                         constants.IEIO_RAW_DISK,
1832
                                         (disk, self.instance),
1833
                                         None)
1834
      transfers.append(dt)
1835

    
1836
    import_result = \
1837
      masterd.instance.TransferInstanceData(self, feedback_fn,
1838
                                            source_node.uuid,
1839
                                            target_node.uuid,
1840
                                            target_node.secondary_ip,
1841
                                            self.op.compress,
1842
                                            self.instance, transfers)
1843
    if not compat.all(import_result):
1844
      errs.append("Failed to transfer instance data")
1845

    
1846
    if errs:
1847
      self.LogWarning("Some disks failed to copy, aborting")
1848
      try:
1849
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1850
      finally:
1851
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1852
        raise errors.OpExecError("Errors during disk copy: %s" %
1853
                                 (",".join(errs),))
1854

    
1855
    self.instance.primary_node = target_node.uuid
1856
    self.cfg.Update(self.instance, feedback_fn)
1857

    
1858
    self.LogInfo("Removing the disks on the original node")
1859
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1860

    
1861
    # Only start the instance if it's marked as up
1862
    if self.instance.admin_state == constants.ADMINST_UP:
1863
      self.LogInfo("Starting instance %s on node %s",
1864
                   self.instance.name, target_node.name)
1865

    
1866
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1867
                                          ignore_secondaries=True)
1868
      if not disks_ok:
1869
        ShutdownInstanceDisks(self, self.instance)
1870
        raise errors.OpExecError("Can't activate the instance's disks")
1871

    
1872
      result = self.rpc.call_instance_start(target_node.uuid,
1873
                                            (self.instance, None, None), False,
1874
                                            self.op.reason)
1875
      msg = result.fail_msg
1876
      if msg:
1877
        ShutdownInstanceDisks(self, self.instance)
1878
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1879
                                 (self.instance.name, target_node.name, msg))
1880

    
1881

    
1882
class LUInstanceMultiAlloc(NoHooksLU):
1883
  """Allocates multiple instances at the same time.
1884

1885
  """
1886
  REQ_BGL = False
1887

    
1888
  def CheckArguments(self):
1889
    """Check arguments.
1890

1891
    """
1892
    nodes = []
1893
    for inst in self.op.instances:
1894
      if inst.iallocator is not None:
1895
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1896
                                   " instance objects", errors.ECODE_INVAL)
1897
      nodes.append(bool(inst.pnode))
1898
      if inst.disk_template in constants.DTS_INT_MIRROR:
1899
        nodes.append(bool(inst.snode))
1900

    
1901
    has_nodes = compat.any(nodes)
1902
    if compat.all(nodes) ^ has_nodes:
1903
      raise errors.OpPrereqError("There are instance objects providing"
1904
                                 " pnode/snode while others do not",
1905
                                 errors.ECODE_INVAL)
1906

    
1907
    if not has_nodes and self.op.iallocator is None:
1908
      default_iallocator = self.cfg.GetDefaultIAllocator()
1909
      if default_iallocator:
1910
        self.op.iallocator = default_iallocator
1911
      else:
1912
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1913
                                   " given and no cluster-wide default"
1914
                                   " iallocator found; please specify either"
1915
                                   " an iallocator or nodes on the instances"
1916
                                   " or set a cluster-wide default iallocator",
1917
                                   errors.ECODE_INVAL)
1918

    
1919
    _CheckOpportunisticLocking(self.op)
1920

    
1921
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1922
    if dups:
1923
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1924
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1925

    
1926
  def ExpandNames(self):
1927
    """Calculate the locks.
1928

1929
    """
1930
    self.share_locks = ShareAll()
1931
    self.needed_locks = {
1932
      # iallocator will select nodes and even if no iallocator is used,
1933
      # collisions with LUInstanceCreate should be avoided
1934
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1935
      }
1936

    
1937
    if self.op.iallocator:
1938
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1939
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1940

    
1941
      if self.op.opportunistic_locking:
1942
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1943
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1944
    else:
1945
      nodeslist = []
1946
      for inst in self.op.instances:
1947
        (inst.pnode_uuid, inst.pnode) = \
1948
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1949
        nodeslist.append(inst.pnode_uuid)
1950
        if inst.snode is not None:
1951
          (inst.snode_uuid, inst.snode) = \
1952
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1953
          nodeslist.append(inst.snode_uuid)
1954

    
1955
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1956
      # Lock resources of instance's primary and secondary nodes (copy to
1957
      # prevent accidential modification)
1958
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1959

    
1960
  def CheckPrereq(self):
1961
    """Check prerequisite.
1962

1963
    """
1964
    if self.op.iallocator:
1965
      cluster = self.cfg.GetClusterInfo()
1966
      default_vg = self.cfg.GetVGName()
1967
      ec_id = self.proc.GetECId()
1968

    
1969
      if self.op.opportunistic_locking:
1970
        # Only consider nodes for which a lock is held
1971
        node_whitelist = self.cfg.GetNodeNames(
1972
                           list(self.owned_locks(locking.LEVEL_NODE)))
1973
      else:
1974
        node_whitelist = None
1975

    
1976
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1977
                                           _ComputeNics(op, cluster, None,
1978
                                                        self.cfg, ec_id),
1979
                                           _ComputeFullBeParams(op, cluster),
1980
                                           node_whitelist)
1981
               for op in self.op.instances]
1982

    
1983
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1984
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1985

    
1986
      ial.Run(self.op.iallocator)
1987

    
1988
      if not ial.success:
1989
        raise errors.OpPrereqError("Can't compute nodes using"
1990
                                   " iallocator '%s': %s" %
1991
                                   (self.op.iallocator, ial.info),
1992
                                   errors.ECODE_NORES)
1993

    
1994
      self.ia_result = ial.result
1995

    
1996
    if self.op.dry_run:
1997
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1998
        constants.JOB_IDS_KEY: [],
1999
        })
2000

    
2001
  def _ConstructPartialResult(self):
2002
    """Contructs the partial result.
2003

2004
    """
2005
    if self.op.iallocator:
2006
      (allocatable, failed_insts) = self.ia_result
2007
      allocatable_insts = map(compat.fst, allocatable)
2008
    else:
2009
      allocatable_insts = [op.instance_name for op in self.op.instances]
2010
      failed_insts = []
2011

    
2012
    return {
2013
      constants.ALLOCATABLE_KEY: allocatable_insts,
2014
      constants.FAILED_KEY: failed_insts,
2015
      }
2016

    
2017
  def Exec(self, feedback_fn):
2018
    """Executes the opcode.
2019

2020
    """
2021
    jobs = []
2022
    if self.op.iallocator:
2023
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2024
      (allocatable, failed) = self.ia_result
2025

    
2026
      for (name, node_names) in allocatable:
2027
        op = op2inst.pop(name)
2028

    
2029
        (op.pnode_uuid, op.pnode) = \
2030
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2031
        if len(node_names) > 1:
2032
          (op.snode_uuid, op.snode) = \
2033
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2034

    
2035
          jobs.append([op])
2036

    
2037
        missing = set(op2inst.keys()) - set(failed)
2038
        assert not missing, \
2039
          "Iallocator did return incomplete result: %s" % \
2040
          utils.CommaJoin(missing)
2041
    else:
2042
      jobs.extend([op] for op in self.op.instances)
2043

    
2044
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2045

    
2046

    
2047
class _InstNicModPrivate:
2048
  """Data structure for network interface modifications.
2049

2050
  Used by L{LUInstanceSetParams}.
2051

2052
  """
2053
  def __init__(self):
2054
    self.params = None
2055
    self.filled = None
2056

    
2057

    
2058
def _PrepareContainerMods(mods, private_fn):
2059
  """Prepares a list of container modifications by adding a private data field.
2060

2061
  @type mods: list of tuples; (operation, index, parameters)
2062
  @param mods: List of modifications
2063
  @type private_fn: callable or None
2064
  @param private_fn: Callable for constructing a private data field for a
2065
    modification
2066
  @rtype: list
2067

2068
  """
2069
  if private_fn is None:
2070
    fn = lambda: None
2071
  else:
2072
    fn = private_fn
2073

    
2074
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2075

    
2076

    
2077
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2078
  """Checks if nodes have enough physical CPUs
2079

2080
  This function checks if all given nodes have the needed number of
2081
  physical CPUs. In case any node has less CPUs or we cannot get the
2082
  information from the node, this function raises an OpPrereqError
2083
  exception.
2084

2085
  @type lu: C{LogicalUnit}
2086
  @param lu: a logical unit from which we get configuration data
2087
  @type node_uuids: C{list}
2088
  @param node_uuids: the list of node UUIDs to check
2089
  @type requested: C{int}
2090
  @param requested: the minimum acceptable number of physical CPUs
2091
  @type hypervisor_specs: list of pairs (string, dict of strings)
2092
  @param hypervisor_specs: list of hypervisor specifications in
2093
      pairs (hypervisor_name, hvparams)
2094
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2095
      or we cannot check the node
2096

2097
  """
2098
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2099
  for node_uuid in node_uuids:
2100
    info = nodeinfo[node_uuid]
2101
    node_name = lu.cfg.GetNodeName(node_uuid)
2102
    info.Raise("Cannot get current information from node %s" % node_name,
2103
               prereq=True, ecode=errors.ECODE_ENVIRON)
2104
    (_, _, (hv_info, )) = info.payload
2105
    num_cpus = hv_info.get("cpu_total", None)
2106
    if not isinstance(num_cpus, int):
2107
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2108
                                 " on node %s, result was '%s'" %
2109
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2110
    if requested > num_cpus:
2111
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2112
                                 "required" % (node_name, num_cpus, requested),
2113
                                 errors.ECODE_NORES)
2114

    
2115

    
2116
def GetItemFromContainer(identifier, kind, container):
2117
  """Return the item refered by the identifier.
2118

2119
  @type identifier: string
2120
  @param identifier: Item index or name or UUID
2121
  @type kind: string
2122
  @param kind: One-word item description
2123
  @type container: list
2124
  @param container: Container to get the item from
2125

2126
  """
2127
  # Index
2128
  try:
2129
    idx = int(identifier)
2130
    if idx == -1:
2131
      # Append
2132
      absidx = len(container) - 1
2133
    elif idx < 0:
2134
      raise IndexError("Not accepting negative indices other than -1")
2135
    elif idx > len(container):
2136
      raise IndexError("Got %s index %s, but there are only %s" %
2137
                       (kind, idx, len(container)))
2138
    else:
2139
      absidx = idx
2140
    return (absidx, container[idx])
2141
  except ValueError:
2142
    pass
2143

    
2144
  for idx, item in enumerate(container):
2145
    if item.uuid == identifier or item.name == identifier:
2146
      return (idx, item)
2147

    
2148
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2149
                             (kind, identifier), errors.ECODE_NOENT)
2150

    
2151

    
2152
def _ApplyContainerMods(kind, container, chgdesc, mods,
2153
                        create_fn, modify_fn, remove_fn,
2154
                        post_add_fn=None):
2155
  """Applies descriptions in C{mods} to C{container}.
2156

2157
  @type kind: string
2158
  @param kind: One-word item description
2159
  @type container: list
2160
  @param container: Container to modify
2161
  @type chgdesc: None or list
2162
  @param chgdesc: List of applied changes
2163
  @type mods: list
2164
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2165
  @type create_fn: callable
2166
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2167
    receives absolute item index, parameters and private data object as added
2168
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2169
    as list
2170
  @type modify_fn: callable
2171
  @param modify_fn: Callback for modifying an existing item
2172
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2173
    and private data object as added by L{_PrepareContainerMods}, returns
2174
    changes as list
2175
  @type remove_fn: callable
2176
  @param remove_fn: Callback on removing item; receives absolute item index,
2177
    item and private data object as added by L{_PrepareContainerMods}
2178
  @type post_add_fn: callable
2179
  @param post_add_fn: Callable for post-processing a newly created item after
2180
    it has been put into the container. It receives the index of the new item
2181
    and the new item as parameters.
2182

2183
  """
2184
  for (op, identifier, params, private) in mods:
2185
    changes = None
2186

    
2187
    if op == constants.DDM_ADD:
2188
      # Calculate where item will be added
2189
      # When adding an item, identifier can only be an index
2190
      try:
2191
        idx = int(identifier)
2192
      except ValueError:
2193
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2194
                                   " identifier for %s" % constants.DDM_ADD,
2195
                                   errors.ECODE_INVAL)
2196
      if idx == -1:
2197
        addidx = len(container)
2198
      else:
2199
        if idx < 0:
2200
          raise IndexError("Not accepting negative indices other than -1")
2201
        elif idx > len(container):
2202
          raise IndexError("Got %s index %s, but there are only %s" %
2203
                           (kind, idx, len(container)))
2204
        addidx = idx
2205

    
2206
      if create_fn is None:
2207
        item = params
2208
      else:
2209
        (item, changes) = create_fn(addidx, params, private)
2210

    
2211
      if idx == -1:
2212
        container.append(item)
2213
      else:
2214
        assert idx >= 0
2215
        assert idx <= len(container)
2216
        # list.insert does so before the specified index
2217
        container.insert(idx, item)
2218

    
2219
      if post_add_fn is not None:
2220
        post_add_fn(addidx, item)
2221

    
2222
    else:
2223
      # Retrieve existing item
2224
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2225

    
2226
      if op == constants.DDM_REMOVE:
2227
        assert not params
2228

    
2229
        changes = [("%s/%s" % (kind, absidx), "remove")]
2230

    
2231
        if remove_fn is not None:
2232
          msg = remove_fn(absidx, item, private)
2233
          if msg:
2234
            changes.append(("%s/%s" % (kind, absidx), msg))
2235

    
2236
        assert container[absidx] == item
2237
        del container[absidx]
2238
      elif op == constants.DDM_MODIFY:
2239
        if modify_fn is not None:
2240
          changes = modify_fn(absidx, item, params, private)
2241
      else:
2242
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2243

    
2244
    assert _TApplyContModsCbChanges(changes)
2245

    
2246
    if not (chgdesc is None or changes is None):
2247
      chgdesc.extend(changes)
2248

    
2249

    
2250
def _UpdateIvNames(base_index, disks):
2251
  """Updates the C{iv_name} attribute of disks.
2252

2253
  @type disks: list of L{objects.Disk}
2254

2255
  """
2256
  for (idx, disk) in enumerate(disks):
2257
    disk.iv_name = "disk/%s" % (base_index + idx, )
2258

    
2259

    
2260
class LUInstanceSetParams(LogicalUnit):
2261
  """Modifies an instances's parameters.
2262

2263
  """
2264
  HPATH = "instance-modify"
2265
  HTYPE = constants.HTYPE_INSTANCE
2266
  REQ_BGL = False
2267

    
2268
  @staticmethod
2269
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2270
    assert ht.TList(mods)
2271
    assert not mods or len(mods[0]) in (2, 3)
2272

    
2273
    if mods and len(mods[0]) == 2:
2274
      result = []
2275

    
2276
      addremove = 0
2277
      for op, params in mods:
2278
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2279
          result.append((op, -1, params))
2280
          addremove += 1
2281

    
2282
          if addremove > 1:
2283
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2284
                                       " supported at a time" % kind,
2285
                                       errors.ECODE_INVAL)
2286
        else:
2287
          result.append((constants.DDM_MODIFY, op, params))
2288

    
2289
      assert verify_fn(result)
2290
    else:
2291
      result = mods
2292

    
2293
    return result
2294

    
2295
  @staticmethod
2296
  def _CheckMods(kind, mods, key_types, item_fn):
2297
    """Ensures requested disk/NIC modifications are valid.
2298

2299
    """
2300
    for (op, _, params) in mods:
2301
      assert ht.TDict(params)
2302

    
2303
      # If 'key_types' is an empty dict, we assume we have an
2304
      # 'ext' template and thus do not ForceDictType
2305
      if key_types:
2306
        utils.ForceDictType(params, key_types)
2307

    
2308
      if op == constants.DDM_REMOVE:
2309
        if params:
2310
          raise errors.OpPrereqError("No settings should be passed when"
2311
                                     " removing a %s" % kind,
2312
                                     errors.ECODE_INVAL)
2313
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2314
        item_fn(op, params)
2315
      else:
2316
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2317

    
2318
  @staticmethod
2319
  def _VerifyDiskModification(op, params, excl_stor):
2320
    """Verifies a disk modification.
2321

2322
    """
2323
    if op == constants.DDM_ADD:
2324
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2325
      if mode not in constants.DISK_ACCESS_SET:
2326
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2327
                                   errors.ECODE_INVAL)
2328

    
2329
      size = params.get(constants.IDISK_SIZE, None)
2330
      if size is None:
2331
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2332
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2333
      size = int(size)
2334

    
2335
      params[constants.IDISK_SIZE] = size
2336
      name = params.get(constants.IDISK_NAME, None)
2337
      if name is not None and name.lower() == constants.VALUE_NONE:
2338
        params[constants.IDISK_NAME] = None
2339

    
2340
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2341

    
2342
    elif op == constants.DDM_MODIFY:
2343
      if constants.IDISK_SIZE in params:
2344
        raise errors.OpPrereqError("Disk size change not possible, use"
2345
                                   " grow-disk", errors.ECODE_INVAL)
2346
      if len(params) > 2:
2347
        raise errors.OpPrereqError("Disk modification doesn't support"
2348
                                   " additional arbitrary parameters",
2349
                                   errors.ECODE_INVAL)
2350
      name = params.get(constants.IDISK_NAME, None)
2351
      if name is not None and name.lower() == constants.VALUE_NONE:
2352
        params[constants.IDISK_NAME] = None
2353

    
2354
  @staticmethod
2355
  def _VerifyNicModification(op, params):
2356
    """Verifies a network interface modification.
2357

2358
    """
2359
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2360
      ip = params.get(constants.INIC_IP, None)
2361
      name = params.get(constants.INIC_NAME, None)
2362
      req_net = params.get(constants.INIC_NETWORK, None)
2363
      link = params.get(constants.NIC_LINK, None)
2364
      mode = params.get(constants.NIC_MODE, None)
2365
      if name is not None and name.lower() == constants.VALUE_NONE:
2366
        params[constants.INIC_NAME] = None
2367
      if req_net is not None:
2368
        if req_net.lower() == constants.VALUE_NONE:
2369
          params[constants.INIC_NETWORK] = None
2370
          req_net = None
2371
        elif link is not None or mode is not None:
2372
          raise errors.OpPrereqError("If network is given"
2373
                                     " mode or link should not",
2374
                                     errors.ECODE_INVAL)
2375

    
2376
      if op == constants.DDM_ADD:
2377
        macaddr = params.get(constants.INIC_MAC, None)
2378
        if macaddr is None:
2379
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2380

    
2381
      if ip is not None:
2382
        if ip.lower() == constants.VALUE_NONE:
2383
          params[constants.INIC_IP] = None
2384
        else:
2385
          if ip.lower() == constants.NIC_IP_POOL:
2386
            if op == constants.DDM_ADD and req_net is None:
2387
              raise errors.OpPrereqError("If ip=pool, parameter network"
2388
                                         " cannot be none",
2389
                                         errors.ECODE_INVAL)
2390
          else:
2391
            if not netutils.IPAddress.IsValid(ip):
2392
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2393
                                         errors.ECODE_INVAL)
2394

    
2395
      if constants.INIC_MAC in params:
2396
        macaddr = params[constants.INIC_MAC]
2397
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2398
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2399

    
2400
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2401
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2402
                                     " modifying an existing NIC",
2403
                                     errors.ECODE_INVAL)
2404

    
2405
  def CheckArguments(self):
2406
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2407
            self.op.hvparams or self.op.beparams or self.op.os_name or
2408
            self.op.osparams or self.op.offline is not None or
2409
            self.op.runtime_mem or self.op.pnode):
2410
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2411

    
2412
    if self.op.hvparams:
2413
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2414
                           "hypervisor", "instance", "cluster")
2415

    
2416
    self.op.disks = self._UpgradeDiskNicMods(
2417
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2418
    self.op.nics = self._UpgradeDiskNicMods(
2419
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2420

    
2421
    if self.op.disks and self.op.disk_template is not None:
2422
      raise errors.OpPrereqError("Disk template conversion and other disk"
2423
                                 " changes not supported at the same time",
2424
                                 errors.ECODE_INVAL)
2425

    
2426
    if (self.op.disk_template and
2427
        self.op.disk_template in constants.DTS_INT_MIRROR and
2428
        self.op.remote_node is None):
2429
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2430
                                 " one requires specifying a secondary node",
2431
                                 errors.ECODE_INVAL)
2432

    
2433
    # Check NIC modifications
2434
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2435
                    self._VerifyNicModification)
2436

    
2437
    if self.op.pnode:
2438
      (self.op.pnode_uuid, self.op.pnode) = \
2439
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2440

    
2441
  def ExpandNames(self):
2442
    self._ExpandAndLockInstance()
2443
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2444
    # Can't even acquire node locks in shared mode as upcoming changes in
2445
    # Ganeti 2.6 will start to modify the node object on disk conversion
2446
    self.needed_locks[locking.LEVEL_NODE] = []
2447
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2448
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2449
    # Look node group to look up the ipolicy
2450
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2451

    
2452
  def DeclareLocks(self, level):
2453
    if level == locking.LEVEL_NODEGROUP:
2454
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2455
      # Acquire locks for the instance's nodegroups optimistically. Needs
2456
      # to be verified in CheckPrereq
2457
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2458
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2459
    elif level == locking.LEVEL_NODE:
2460
      self._LockInstancesNodes()
2461
      if self.op.disk_template and self.op.remote_node:
2462
        (self.op.remote_node_uuid, self.op.remote_node) = \
2463
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2464
                                self.op.remote_node)
2465
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2466
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2467
      # Copy node locks
2468
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2469
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2470

    
2471
  def BuildHooksEnv(self):
2472
    """Build hooks env.
2473

2474
    This runs on the master, primary and secondaries.
2475

2476
    """
2477
    args = {}
2478
    if constants.BE_MINMEM in self.be_new:
2479
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2480
    if constants.BE_MAXMEM in self.be_new:
2481
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2482
    if constants.BE_VCPUS in self.be_new:
2483
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2484
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2485
    # information at all.
2486

    
2487
    if self._new_nics is not None:
2488
      nics = []
2489

    
2490
      for nic in self._new_nics:
2491
        n = copy.deepcopy(nic)
2492
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2493
        n.nicparams = nicparams
2494
        nics.append(NICToTuple(self, n))
2495

    
2496
      args["nics"] = nics
2497

    
2498
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2499
    if self.op.disk_template:
2500
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2501
    if self.op.runtime_mem:
2502
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2503

    
2504
    return env
2505

    
2506
  def BuildHooksNodes(self):
2507
    """Build hooks nodes.
2508

2509
    """
2510
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2511
    return (nl, nl)
2512

    
2513
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2514
                              old_params, cluster, pnode_uuid):
2515

    
2516
    update_params_dict = dict([(key, params[key])
2517
                               for key in constants.NICS_PARAMETERS
2518
                               if key in params])
2519

    
2520
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2521
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2522

    
2523
    new_net_uuid = None
2524
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2525
    if new_net_uuid_or_name:
2526
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2527
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2528

    
2529
    if old_net_uuid:
2530
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2531

    
2532
    if new_net_uuid:
2533
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2534
      if not netparams:
2535
        raise errors.OpPrereqError("No netparams found for the network"
2536
                                   " %s, probably not connected" %
2537
                                   new_net_obj.name, errors.ECODE_INVAL)
2538
      new_params = dict(netparams)
2539
    else:
2540
      new_params = GetUpdatedParams(old_params, update_params_dict)
2541

    
2542
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2543

    
2544
    new_filled_params = cluster.SimpleFillNIC(new_params)
2545
    objects.NIC.CheckParameterSyntax(new_filled_params)
2546

    
2547
    new_mode = new_filled_params[constants.NIC_MODE]
2548
    if new_mode == constants.NIC_MODE_BRIDGED:
2549
      bridge = new_filled_params[constants.NIC_LINK]
2550
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2551
      if msg:
2552
        msg = "Error checking bridges on node '%s': %s" % \
2553
                (self.cfg.GetNodeName(pnode_uuid), msg)
2554
        if self.op.force:
2555
          self.warn.append(msg)
2556
        else:
2557
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2558

    
2559
    elif new_mode == constants.NIC_MODE_ROUTED:
2560
      ip = params.get(constants.INIC_IP, old_ip)
2561
      if ip is None:
2562
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2563
                                   " on a routed NIC", errors.ECODE_INVAL)
2564

    
2565
    elif new_mode == constants.NIC_MODE_OVS:
2566
      # TODO: check OVS link
2567
      self.LogInfo("OVS links are currently not checked for correctness")
2568

    
2569
    if constants.INIC_MAC in params:
2570
      mac = params[constants.INIC_MAC]
2571
      if mac is None:
2572
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2573
                                   errors.ECODE_INVAL)
2574
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2575
        # otherwise generate the MAC address
2576
        params[constants.INIC_MAC] = \
2577
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2578
      else:
2579
        # or validate/reserve the current one
2580
        try:
2581
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2582
        except errors.ReservationError:
2583
          raise errors.OpPrereqError("MAC address '%s' already in use"
2584
                                     " in cluster" % mac,
2585
                                     errors.ECODE_NOTUNIQUE)
2586
    elif new_net_uuid != old_net_uuid:
2587

    
2588
      def get_net_prefix(net_uuid):
2589
        mac_prefix = None
2590
        if net_uuid:
2591
          nobj = self.cfg.GetNetwork(net_uuid)
2592
          mac_prefix = nobj.mac_prefix
2593

    
2594
        return mac_prefix
2595

    
2596
      new_prefix = get_net_prefix(new_net_uuid)
2597
      old_prefix = get_net_prefix(old_net_uuid)
2598
      if old_prefix != new_prefix:
2599
        params[constants.INIC_MAC] = \
2600
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2601

    
2602
    # if there is a change in (ip, network) tuple
2603
    new_ip = params.get(constants.INIC_IP, old_ip)
2604
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2605
      if new_ip:
2606
        # if IP is pool then require a network and generate one IP
2607
        if new_ip.lower() == constants.NIC_IP_POOL:
2608
          if new_net_uuid:
2609
            try:
2610
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2611
            except errors.ReservationError:
2612
              raise errors.OpPrereqError("Unable to get a free IP"
2613
                                         " from the address pool",
2614
                                         errors.ECODE_STATE)
2615
            self.LogInfo("Chose IP %s from network %s",
2616
                         new_ip,
2617
                         new_net_obj.name)
2618
            params[constants.INIC_IP] = new_ip
2619
          else:
2620
            raise errors.OpPrereqError("ip=pool, but no network found",
2621
                                       errors.ECODE_INVAL)
2622
        # Reserve new IP if in the new network if any
2623
        elif new_net_uuid:
2624
          try:
2625
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2626
            self.LogInfo("Reserving IP %s in network %s",
2627
                         new_ip, new_net_obj.name)
2628
          except errors.ReservationError:
2629
            raise errors.OpPrereqError("IP %s not available in network %s" %
2630
                                       (new_ip, new_net_obj.name),
2631
                                       errors.ECODE_NOTUNIQUE)
2632
        # new network is None so check if new IP is a conflicting IP
2633
        elif self.op.conflicts_check:
2634
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2635

    
2636
      # release old IP if old network is not None
2637
      if old_ip and old_net_uuid:
2638
        try:
2639
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2640
        except errors.AddressPoolError:
2641
          logging.warning("Release IP %s not contained in network %s",
2642
                          old_ip, old_net_obj.name)
2643

    
2644
    # there are no changes in (ip, network) tuple and old network is not None
2645
    elif (old_net_uuid is not None and
2646
          (req_link is not None or req_mode is not None)):
2647
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2648
                                 " a NIC that is connected to a network",
2649
                                 errors.ECODE_INVAL)
2650

    
2651
    private.params = new_params
2652
    private.filled = new_filled_params
2653

    
2654
  def _PreCheckDiskTemplate(self, pnode_info):
2655
    """CheckPrereq checks related to a new disk template."""
2656
    # Arguments are passed to avoid configuration lookups
2657
    pnode_uuid = self.instance.primary_node
2658
    if self.instance.disk_template == self.op.disk_template:
2659
      raise errors.OpPrereqError("Instance already has disk template %s" %
2660
                                 self.instance.disk_template,
2661
                                 errors.ECODE_INVAL)
2662

    
2663
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2664
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2665
                                 " cluster." % self.op.disk_template)
2666

    
2667
    if (self.instance.disk_template,
2668
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2669
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2670
                                 " %s to %s" % (self.instance.disk_template,
2671
                                                self.op.disk_template),
2672
                                 errors.ECODE_INVAL)
2673
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2674
                       msg="cannot change disk template")
2675
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2676
      if self.op.remote_node_uuid == pnode_uuid:
2677
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2678
                                   " as the primary node of the instance" %
2679
                                   self.op.remote_node, errors.ECODE_STATE)
2680
      CheckNodeOnline(self, self.op.remote_node_uuid)
2681
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2682
      # FIXME: here we assume that the old instance type is DT_PLAIN
2683
      assert self.instance.disk_template == constants.DT_PLAIN
2684
      disks = [{constants.IDISK_SIZE: d.size,
2685
                constants.IDISK_VG: d.logical_id[0]}
2686
               for d in self.instance.disks]
2687
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2688
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2689

    
2690
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2691
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2692
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2693
                                                              snode_group)
2694
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2695
                             ignore=self.op.ignore_ipolicy)
2696
      if pnode_info.group != snode_info.group:
2697
        self.LogWarning("The primary and secondary nodes are in two"
2698
                        " different node groups; the disk parameters"
2699
                        " from the first disk's node group will be"
2700
                        " used")
2701

    
2702
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2703
      # Make sure none of the nodes require exclusive storage
2704
      nodes = [pnode_info]
2705
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2706
        assert snode_info
2707
        nodes.append(snode_info)
2708
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2709
      if compat.any(map(has_es, nodes)):
2710
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2711
                  " storage is enabled" % (self.instance.disk_template,
2712
                                           self.op.disk_template))
2713
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2714

    
2715
  def _PreCheckDisks(self, ispec):
2716
    """CheckPrereq checks related to disk changes.
2717

2718
    @type ispec: dict
2719
    @param ispec: instance specs to be updated with the new disks
2720

2721
    """
2722
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2723

    
2724
    excl_stor = compat.any(
2725
      rpc.GetExclusiveStorageForNodes(self.cfg,
2726
                                      self.instance.all_nodes).values()
2727
      )
2728

    
2729
    # Check disk modifications. This is done here and not in CheckArguments
2730
    # (as with NICs), because we need to know the instance's disk template
2731
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2732
    if self.instance.disk_template == constants.DT_EXT:
2733
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2734
    else:
2735
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2736
                      ver_fn)
2737

    
2738
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2739

    
2740
    # Check the validity of the `provider' parameter
2741
    if self.instance.disk_template in constants.DT_EXT:
2742
      for mod in self.diskmod:
2743
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2744
        if mod[0] == constants.DDM_ADD:
2745
          if ext_provider is None:
2746
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2747
                                       " '%s' missing, during disk add" %
2748
                                       (constants.DT_EXT,
2749
                                        constants.IDISK_PROVIDER),
2750
                                       errors.ECODE_NOENT)
2751
        elif mod[0] == constants.DDM_MODIFY:
2752
          if ext_provider:
2753
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2754
                                       " modification" %
2755
                                       constants.IDISK_PROVIDER,
2756
                                       errors.ECODE_INVAL)
2757
    else:
2758
      for mod in self.diskmod:
2759
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2760
        if ext_provider is not None:
2761
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2762
                                     " instances of type '%s'" %
2763
                                     (constants.IDISK_PROVIDER,
2764
                                      constants.DT_EXT),
2765
                                     errors.ECODE_INVAL)
2766

    
2767
    if not self.op.wait_for_sync and self.instance.disks_active:
2768
      for mod in self.diskmod:
2769
        if mod[0] == constants.DDM_ADD:
2770
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2771
                                     " activated disks and"
2772
                                     " --no-wait-for-sync given.",
2773
                                     errors.ECODE_INVAL)
2774

    
2775
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2776
      raise errors.OpPrereqError("Disk operations not supported for"
2777
                                 " diskless instances", errors.ECODE_INVAL)
2778

    
2779
    def _PrepareDiskMod(_, disk, params, __):
2780
      disk.name = params.get(constants.IDISK_NAME, None)
2781

    
2782
    # Verify disk changes (operating on a copy)
2783
    disks = copy.deepcopy(self.instance.disks)
2784
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2785
                        _PrepareDiskMod, None)
2786
    utils.ValidateDeviceNames("disk", disks)
2787
    if len(disks) > constants.MAX_DISKS:
2788
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2789
                                 " more" % constants.MAX_DISKS,
2790
                                 errors.ECODE_STATE)
2791
    disk_sizes = [disk.size for disk in self.instance.disks]
2792
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2793
                      self.diskmod if op == constants.DDM_ADD)
2794
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2795
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2796

    
2797
    if self.op.offline is not None and self.op.offline:
2798
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2799
                         msg="can't change to offline")
2800

    
2801
  def CheckPrereq(self):
2802
    """Check prerequisites.
2803

2804
    This only checks the instance list against the existing names.
2805

2806
    """
2807
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2808
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2809
    self.cluster = self.cfg.GetClusterInfo()
2810
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2811

    
2812
    assert self.instance is not None, \
2813
      "Cannot retrieve locked instance %s" % self.op.instance_name
2814

    
2815
    pnode_uuid = self.instance.primary_node
2816

    
2817
    self.warn = []
2818

    
2819
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2820
        not self.op.force):
2821
      # verify that the instance is not up
2822
      instance_info = self.rpc.call_instance_info(
2823
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2824
          cluster_hvparams)
2825
      if instance_info.fail_msg:
2826
        self.warn.append("Can't get instance runtime information: %s" %
2827
                         instance_info.fail_msg)
2828
      elif instance_info.payload:
2829
        raise errors.OpPrereqError("Instance is still running on %s" %
2830
                                   self.cfg.GetNodeName(pnode_uuid),
2831
                                   errors.ECODE_STATE)
2832

    
2833
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2834
    node_uuids = list(self.instance.all_nodes)
2835
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2836

    
2837
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2838
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2839
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2840

    
2841
    # dictionary with instance information after the modification
2842
    ispec = {}
2843

    
2844
    if self.op.hotplug:
2845
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2846
                                               self.instance)
2847
      result.Raise("Hotplug is not supported.")
2848

    
2849
    # Prepare NIC modifications
2850
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2851

    
2852
    # OS change
2853
    if self.op.os_name and not self.op.force:
2854
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2855
                     self.op.force_variant)
2856
      instance_os = self.op.os_name
2857
    else:
2858
      instance_os = self.instance.os
2859

    
2860
    assert not (self.op.disk_template and self.op.disks), \
2861
      "Can't modify disk template and apply disk changes at the same time"
2862

    
2863
    if self.op.disk_template:
2864
      self._PreCheckDiskTemplate(pnode_info)
2865

    
2866
    self._PreCheckDisks(ispec)
2867

    
2868
    # hvparams processing
2869
    if self.op.hvparams:
2870
      hv_type = self.instance.hypervisor
2871
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2872
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2873
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2874

    
2875
      # local check
2876
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2877
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2878
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2879
      self.hv_inst = i_hvdict # the new dict (without defaults)
2880
    else:
2881
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2882
                                                   self.instance.os,
2883
                                                   self.instance.hvparams)
2884
      self.hv_new = self.hv_inst = {}
2885

    
2886
    # beparams processing
2887
    if self.op.beparams:
2888
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2889
                                  use_none=True)
2890
      objects.UpgradeBeParams(i_bedict)
2891
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2892
      be_new = self.cluster.SimpleFillBE(i_bedict)
2893
      self.be_proposed = self.be_new = be_new # the new actual values
2894
      self.be_inst = i_bedict # the new dict (without defaults)
2895
    else:
2896
      self.be_new = self.be_inst = {}
2897
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2898
    be_old = self.cluster.FillBE(self.instance)
2899

    
2900
    # CPU param validation -- checking every time a parameter is
2901
    # changed to cover all cases where either CPU mask or vcpus have
2902
    # changed
2903
    if (constants.BE_VCPUS in self.be_proposed and
2904
        constants.HV_CPU_MASK in self.hv_proposed):
2905
      cpu_list = \
2906
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2907
      # Verify mask is consistent with number of vCPUs. Can skip this
2908
      # test if only 1 entry in the CPU mask, which means same mask
2909
      # is applied to all vCPUs.
2910
      if (len(cpu_list) > 1 and
2911
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2912
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2913
                                   " CPU mask [%s]" %
2914
                                   (self.be_proposed[constants.BE_VCPUS],
2915
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2916
                                   errors.ECODE_INVAL)
2917

    
2918
      # Only perform this test if a new CPU mask is given
2919
      if constants.HV_CPU_MASK in self.hv_new:
2920
        # Calculate the largest CPU number requested
2921
        max_requested_cpu = max(map(max, cpu_list))
2922
        # Check that all of the instance's nodes have enough physical CPUs to
2923
        # satisfy the requested CPU mask
2924
        hvspecs = [(self.instance.hypervisor,
2925
                    self.cfg.GetClusterInfo()
2926
                      .hvparams[self.instance.hypervisor])]
2927
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2928
                                max_requested_cpu + 1,
2929
                                hvspecs)
2930

    
2931
    # osparams processing
2932
    if self.op.osparams:
2933
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2934
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2935
      self.os_inst = i_osdict # the new dict (without defaults)
2936
    else:
2937
      self.os_inst = {}
2938

    
2939
    #TODO(dynmem): do the appropriate check involving MINMEM
2940
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2941
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2942
      mem_check_list = [pnode_uuid]
2943
      if be_new[constants.BE_AUTO_BALANCE]:
2944
        # either we changed auto_balance to yes or it was from before
2945
        mem_check_list.extend(self.instance.secondary_nodes)
2946
      instance_info = self.rpc.call_instance_info(
2947
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2948
          cluster_hvparams)
2949
      hvspecs = [(self.instance.hypervisor,
2950
                  cluster_hvparams)]
2951
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2952
                                         hvspecs)
2953
      pninfo = nodeinfo[pnode_uuid]
2954
      msg = pninfo.fail_msg
2955
      if msg:
2956
        # Assume the primary node is unreachable and go ahead
2957
        self.warn.append("Can't get info from primary node %s: %s" %
2958
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2959
      else:
2960
        (_, _, (pnhvinfo, )) = pninfo.payload
2961
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2962
          self.warn.append("Node data from primary node %s doesn't contain"
2963
                           " free memory information" %
2964
                           self.cfg.GetNodeName(pnode_uuid))
2965
        elif instance_info.fail_msg:
2966
          self.warn.append("Can't get instance runtime information: %s" %
2967
                           instance_info.fail_msg)
2968
        else:
2969
          if instance_info.payload:
2970
            current_mem = int(instance_info.payload["memory"])
2971
          else:
2972
            # Assume instance not running
2973
            # (there is a slight race condition here, but it's not very
2974
            # probable, and we have no other way to check)
2975
            # TODO: Describe race condition
2976
            current_mem = 0
2977
          #TODO(dynmem): do the appropriate check involving MINMEM
2978
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2979
                      pnhvinfo["memory_free"])
2980
          if miss_mem > 0:
2981
            raise errors.OpPrereqError("This change will prevent the instance"
2982
                                       " from starting, due to %d MB of memory"
2983
                                       " missing on its primary node" %
2984
                                       miss_mem, errors.ECODE_NORES)
2985

    
2986
      if be_new[constants.BE_AUTO_BALANCE]:
2987
        for node_uuid, nres in nodeinfo.items():
2988
          if node_uuid not in self.instance.secondary_nodes:
2989
            continue
2990
          nres.Raise("Can't get info from secondary node %s" %
2991
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2992
                     ecode=errors.ECODE_STATE)
2993
          (_, _, (nhvinfo, )) = nres.payload
2994
          if not isinstance(nhvinfo.get("memory_free", None), int):
2995
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2996
                                       " memory information" %
2997
                                       self.cfg.GetNodeName(node_uuid),
2998
                                       errors.ECODE_STATE)
2999
          #TODO(dynmem): do the appropriate check involving MINMEM
3000
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3001
            raise errors.OpPrereqError("This change will prevent the instance"
3002
                                       " from failover to its secondary node"
3003
                                       " %s, due to not enough memory" %
3004
                                       self.cfg.GetNodeName(node_uuid),
3005
                                       errors.ECODE_STATE)
3006

    
3007
    if self.op.runtime_mem:
3008
      remote_info = self.rpc.call_instance_info(
3009
         self.instance.primary_node, self.instance.name,
3010
         self.instance.hypervisor,
3011
         cluster_hvparams)
3012
      remote_info.Raise("Error checking node %s" %
3013
                        self.cfg.GetNodeName(self.instance.primary_node))
3014
      if not remote_info.payload: # not running already
3015
        raise errors.OpPrereqError("Instance %s is not running" %
3016
                                   self.instance.name, errors.ECODE_STATE)
3017

    
3018
      current_memory = remote_info.payload["memory"]
3019
      if (not self.op.force and
3020
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3021
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3022
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3023
                                   " and %d MB of memory unless --force is"
3024
                                   " given" %
3025
                                   (self.instance.name,
3026
                                    self.be_proposed[constants.BE_MINMEM],
3027
                                    self.be_proposed[constants.BE_MAXMEM]),
3028
                                   errors.ECODE_INVAL)
3029

    
3030
      delta = self.op.runtime_mem - current_memory
3031
      if delta > 0:
3032
        CheckNodeFreeMemory(
3033
            self, self.instance.primary_node,
3034
            "ballooning memory for instance %s" % self.instance.name, delta,
3035
            self.instance.hypervisor,
3036
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3037

    
3038
    # make self.cluster visible in the functions below
3039
    cluster = self.cluster
3040

    
3041
    def _PrepareNicCreate(_, params, private):
3042
      self._PrepareNicModification(params, private, None, None,
3043
                                   {}, cluster, pnode_uuid)
3044
      return (None, None)
3045

    
3046
    def _PrepareNicMod(_, nic, params, private):
3047
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3048
                                   nic.nicparams, cluster, pnode_uuid)
3049
      return None
3050

    
3051
    def _PrepareNicRemove(_, params, __):
3052
      ip = params.ip
3053
      net = params.network
3054
      if net is not None and ip is not None:
3055
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3056

    
3057
    # Verify NIC changes (operating on copy)
3058
    nics = self.instance.nics[:]
3059
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3060
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3061
    if len(nics) > constants.MAX_NICS:
3062
      raise errors.OpPrereqError("Instance has too many network interfaces"
3063
                                 " (%d), cannot add more" % constants.MAX_NICS,
3064
                                 errors.ECODE_STATE)
3065

    
3066
    # Pre-compute NIC changes (necessary to use result in hooks)
3067
    self._nic_chgdesc = []
3068
    if self.nicmod:
3069
      # Operate on copies as this is still in prereq
3070
      nics = [nic.Copy() for nic in self.instance.nics]
3071
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3072
                          self._CreateNewNic, self._ApplyNicMods,
3073
                          self._RemoveNic)
3074
      # Verify that NIC names are unique and valid
3075
      utils.ValidateDeviceNames("NIC", nics)
3076
      self._new_nics = nics
3077
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3078
    else:
3079
      self._new_nics = None
3080
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3081

    
3082
    if not self.op.ignore_ipolicy:
3083
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3084
                                                              group_info)
3085

    
3086
      # Fill ispec with backend parameters
3087
      ispec[constants.ISPEC_SPINDLE_USE] = \
3088
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3089
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3090
                                                         None)
3091

    
3092
      # Copy ispec to verify parameters with min/max values separately
3093
      if self.op.disk_template:
3094
        new_disk_template = self.op.disk_template
3095
      else:
3096
        new_disk_template = self.instance.disk_template
3097
      ispec_max = ispec.copy()
3098
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3099
        self.be_new.get(constants.BE_MAXMEM, None)
3100
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3101
                                                     new_disk_template)
3102
      ispec_min = ispec.copy()
3103
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3104
        self.be_new.get(constants.BE_MINMEM, None)
3105
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3106
                                                     new_disk_template)
3107

    
3108
      if (res_max or res_min):
3109
        # FIXME: Improve error message by including information about whether
3110
        # the upper or lower limit of the parameter fails the ipolicy.
3111
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3112
               (group_info, group_info.name,
3113
                utils.CommaJoin(set(res_max + res_min))))
3114
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3115

    
3116
  def _ConvertPlainToDrbd(self, feedback_fn):
3117
    """Converts an instance from plain to drbd.
3118

3119
    """
3120
    feedback_fn("Converting template to drbd")
3121
    pnode_uuid = self.instance.primary_node
3122
    snode_uuid = self.op.remote_node_uuid
3123

    
3124
    assert self.instance.disk_template == constants.DT_PLAIN
3125

    
3126
    # create a fake disk info for _GenerateDiskTemplate
3127
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3128
                  constants.IDISK_VG: d.logical_id[0],
3129
                  constants.IDISK_NAME: d.name}
3130
                 for d in self.instance.disks]
3131
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3132
                                     self.instance.uuid, pnode_uuid,
3133
                                     [snode_uuid], disk_info, None, None, 0,
3134
                                     feedback_fn, self.diskparams)
3135
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3136
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3137
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3138
    info = GetInstanceInfoText(self.instance)
3139
    feedback_fn("Creating additional volumes...")
3140
    # first, create the missing data and meta devices
3141
    for disk in anno_disks:
3142
      # unfortunately this is... not too nice
3143
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3144
                           info, True, p_excl_stor)
3145
      for child in disk.children:
3146
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3147
                             s_excl_stor)
3148
    # at this stage, all new LVs have been created, we can rename the
3149
    # old ones
3150
    feedback_fn("Renaming original volumes...")
3151
    rename_list = [(o, n.children[0].logical_id)
3152
                   for (o, n) in zip(self.instance.disks, new_disks)]
3153
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3154
    result.Raise("Failed to rename original LVs")
3155

    
3156
    feedback_fn("Initializing DRBD devices...")
3157
    # all child devices are in place, we can now create the DRBD devices
3158
    try:
3159
      for disk in anno_disks:
3160
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3161
                                       (snode_uuid, s_excl_stor)]:
3162
          f_create = node_uuid == pnode_uuid
3163
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3164
                               f_create, excl_stor)
3165
    except errors.GenericError, e:
3166
      feedback_fn("Initializing of DRBD devices failed;"
3167
                  " renaming back original volumes...")
3168
      rename_back_list = [(n.children[0], o.logical_id)
3169
                          for (n, o) in zip(new_disks, self.instance.disks)]
3170
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3171
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3172
      raise
3173

    
3174
    # at this point, the instance has been modified
3175
    self.instance.disk_template = constants.DT_DRBD8
3176
    self.instance.disks = new_disks
3177
    self.cfg.Update(self.instance, feedback_fn)
3178

    
3179
    # Release node locks while waiting for sync
3180
    ReleaseLocks(self, locking.LEVEL_NODE)
3181

    
3182
    # disks are created, waiting for sync
3183
    disk_abort = not WaitForSync(self, self.instance,
3184
                                 oneshot=not self.op.wait_for_sync)
3185
    if disk_abort:
3186
      raise errors.OpExecError("There are some degraded disks for"
3187
                               " this instance, please cleanup manually")
3188

    
3189
    # Node resource locks will be released by caller
3190

    
3191
  def _ConvertDrbdToPlain(self, feedback_fn):
3192
    """Converts an instance from drbd to plain.
3193

3194
    """
3195
    assert len(self.instance.secondary_nodes) == 1
3196
    assert self.instance.disk_template == constants.DT_DRBD8
3197

    
3198
    pnode_uuid = self.instance.primary_node
3199
    snode_uuid = self.instance.secondary_nodes[0]
3200
    feedback_fn("Converting template to plain")
3201

    
3202
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3203
    new_disks = [d.children[0] for d in self.instance.disks]
3204

    
3205
    # copy over size, mode and name
3206
    for parent, child in zip(old_disks, new_disks):
3207
      child.size = parent.size
3208
      child.mode = parent.mode
3209
      child.name = parent.name
3210

    
3211
    # this is a DRBD disk, return its port to the pool
3212
    # NOTE: this must be done right before the call to cfg.Update!
3213
    for disk in old_disks:
3214
      tcp_port = disk.logical_id[2]
3215
      self.cfg.AddTcpUdpPort(tcp_port)
3216

    
3217
    # update instance structure
3218
    self.instance.disks = new_disks
3219
    self.instance.disk_template = constants.DT_PLAIN
3220
    _UpdateIvNames(0, self.instance.disks)
3221
    self.cfg.Update(self.instance, feedback_fn)
3222

    
3223
    # Release locks in case removing disks takes a while
3224
    ReleaseLocks(self, locking.LEVEL_NODE)
3225

    
3226
    feedback_fn("Removing volumes on the secondary node...")
3227
    for disk in old_disks:
3228
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3229
      result.Warn("Could not remove block device %s on node %s,"
3230
                  " continuing anyway" %
3231
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3232
                  self.LogWarning)
3233

    
3234
    feedback_fn("Removing unneeded volumes on the primary node...")
3235
    for idx, disk in enumerate(old_disks):
3236
      meta = disk.children[1]
3237
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3238
      result.Warn("Could not remove metadata for disk %d on node %s,"
3239
                  " continuing anyway" %
3240
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3241
                  self.LogWarning)
3242

    
3243
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3244
    self.LogInfo("Trying to hotplug device...")
3245
    msg = "hotplug:"
3246
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3247
                                          self.instance, action, dev_type,
3248
                                          (device, self.instance),
3249
                                          extra, seq)
3250
    if result.fail_msg:
3251
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3252
      self.LogInfo("Continuing execution..")
3253
      msg += "failed"
3254
    else:
3255
      self.LogInfo("Hotplug done.")
3256
      msg += "done"
3257
    return msg
3258

    
3259
  def _CreateNewDisk(self, idx, params, _):
3260
    """Creates a new disk.
3261

3262
    """
3263
    # add a new disk
3264
    if self.instance.disk_template in constants.DTS_FILEBASED:
3265
      (file_driver, file_path) = self.instance.disks[0].logical_id
3266
      file_path = os.path.dirname(file_path)
3267
    else:
3268
      file_driver = file_path = None
3269

    
3270
    disk = \
3271
      GenerateDiskTemplate(self, self.instance.disk_template,
3272
                           self.instance.uuid, self.instance.primary_node,
3273
                           self.instance.secondary_nodes, [params], file_path,
3274
                           file_driver, idx, self.Log, self.diskparams)[0]
3275

    
3276
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3277

    
3278
    if self.cluster.prealloc_wipe_disks:
3279
      # Wipe new disk
3280
      WipeOrCleanupDisks(self, self.instance,
3281
                         disks=[(idx, disk, 0)],
3282
                         cleanup=new_disks)
3283

    
3284
    changes = [
3285
      ("disk/%d" % idx,
3286
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3287
      ]
3288
    if self.op.hotplug:
3289
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3290
                                               (disk, self.instance),
3291
                                               self.instance.name, True, idx)
3292
      if result.fail_msg:
3293
        changes.append(("disk/%d" % idx, "assemble:failed"))
3294
        self.LogWarning("Can't assemble newly created disk %d: %s",
3295
                        idx, result.fail_msg)
3296
      else:
3297
        _, link_name = result.payload
3298
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3299
                                  constants.HOTPLUG_TARGET_DISK,
3300
                                  disk, link_name, idx)
3301
        changes.append(("disk/%d" % idx, msg))
3302

    
3303
    return (disk, changes)
3304

    
3305
  def _PostAddDisk(self, _, disk):
3306
    if not WaitForSync(self, self.instance, disks=[disk],
3307
                       oneshot=not self.op.wait_for_sync):
3308
      raise errors.OpExecError("Failed to sync disks of %s" %
3309
                               self.instance.name)
3310

    
3311
    # the disk is active at this point, so deactivate it if the instance disks
3312
    # are supposed to be inactive
3313
    if not self.instance.disks_active:
3314
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3315

    
3316
  @staticmethod
3317
  def _ModifyDisk(idx, disk, params, _):
3318
    """Modifies a disk.
3319

3320
    """
3321
    changes = []
3322
    mode = params.get(constants.IDISK_MODE, None)
3323
    if mode:
3324
      disk.mode = mode
3325
      changes.append(("disk.mode/%d" % idx, disk.mode))
3326

    
3327
    name = params.get(constants.IDISK_NAME, None)
3328
    disk.name = name
3329
    changes.append(("disk.name/%d" % idx, disk.name))
3330

    
3331
    return changes
3332

    
3333
  def _RemoveDisk(self, idx, root, _):
3334
    """Removes a disk.
3335

3336
    """
3337
    hotmsg = ""
3338
    if self.op.hotplug:
3339
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3340
                                   constants.HOTPLUG_TARGET_DISK,
3341
                                   root, None, idx)
3342
      ShutdownInstanceDisks(self, self.instance, [root])
3343

    
3344
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3345
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3346
                             self.instance.primary_node):
3347
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3348
              .fail_msg
3349
      if msg:
3350
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3351
                        " continuing anyway", idx,
3352
                        self.cfg.GetNodeName(node_uuid), msg)
3353

    
3354
    # if this is a DRBD disk, return its port to the pool
3355
    if root.dev_type in constants.DTS_DRBD:
3356
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3357

    
3358
    return hotmsg
3359

    
3360
  def _CreateNewNic(self, idx, params, private):
3361
    """Creates data structure for a new network interface.
3362

3363
    """
3364
    mac = params[constants.INIC_MAC]
3365
    ip = params.get(constants.INIC_IP, None)
3366
    net = params.get(constants.INIC_NETWORK, None)
3367
    name = params.get(constants.INIC_NAME, None)
3368
    net_uuid = self.cfg.LookupNetwork(net)
3369
    #TODO: not private.filled?? can a nic have no nicparams??
3370
    nicparams = private.filled
3371
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3372
                       nicparams=nicparams)
3373
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3374

    
3375
    changes = [
3376
      ("nic.%d" % idx,
3377
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3378
       (mac, ip, private.filled[constants.NIC_MODE],
3379
       private.filled[constants.NIC_LINK], net)),
3380
      ]
3381

    
3382
    if self.op.hotplug:
3383
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3384
                                constants.HOTPLUG_TARGET_NIC,
3385
                                nobj, None, idx)
3386
      changes.append(("nic.%d" % idx, msg))
3387

    
3388
    return (nobj, changes)
3389

    
3390
  def _ApplyNicMods(self, idx, nic, params, private):
3391
    """Modifies a network interface.
3392

3393
    """
3394
    changes = []
3395

    
3396
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3397
      if key in params:
3398
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3399
        setattr(nic, key, params[key])
3400

    
3401
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3402
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3403
    if new_net_uuid != nic.network:
3404
      changes.append(("nic.network/%d" % idx, new_net))
3405
      nic.network = new_net_uuid
3406

    
3407
    if private.filled:
3408
      nic.nicparams = private.filled
3409

    
3410
      for (key, val) in nic.nicparams.items():
3411
        changes.append(("nic.%s/%d" % (key, idx), val))
3412

    
3413
    if self.op.hotplug:
3414
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3415
                                constants.HOTPLUG_TARGET_NIC,
3416
                                nic, None, idx)
3417
      changes.append(("nic/%d" % idx, msg))
3418

    
3419
    return changes
3420

    
3421
  def _RemoveNic(self, idx, nic, _):
3422
    if self.op.hotplug:
3423
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3424
                                 constants.HOTPLUG_TARGET_NIC,
3425
                                 nic, None, idx)
3426

    
3427
  def Exec(self, feedback_fn):
3428
    """Modifies an instance.
3429

3430
    All parameters take effect only at the next restart of the instance.
3431

3432
    """
3433
    # Process here the warnings from CheckPrereq, as we don't have a
3434
    # feedback_fn there.
3435
    # TODO: Replace with self.LogWarning
3436
    for warn in self.warn:
3437
      feedback_fn("WARNING: %s" % warn)
3438

    
3439
    assert ((self.op.disk_template is None) ^
3440
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3441
      "Not owning any node resource locks"
3442

    
3443
    result = []
3444

    
3445
    # New primary node
3446
    if self.op.pnode_uuid:
3447
      self.instance.primary_node = self.op.pnode_uuid
3448

    
3449
    # runtime memory
3450
    if self.op.runtime_mem:
3451
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3452
                                                     self.instance,
3453
                                                     self.op.runtime_mem)
3454
      rpcres.Raise("Cannot modify instance runtime memory")
3455
      result.append(("runtime_memory", self.op.runtime_mem))
3456

    
3457
    # Apply disk changes
3458
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3459
                        self._CreateNewDisk, self._ModifyDisk,
3460
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3461
    _UpdateIvNames(0, self.instance.disks)
3462

    
3463
    if self.op.disk_template:
3464
      if __debug__:
3465
        check_nodes = set(self.instance.all_nodes)
3466
        if self.op.remote_node_uuid:
3467
          check_nodes.add(self.op.remote_node_uuid)
3468
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3469
          owned = self.owned_locks(level)
3470
          assert not (check_nodes - owned), \
3471
            ("Not owning the correct locks, owning %r, expected at least %r" %
3472
             (owned, check_nodes))
3473

    
3474
      r_shut = ShutdownInstanceDisks(self, self.instance)
3475
      if not r_shut:
3476
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3477
                                 " proceed with disk template conversion")
3478
      mode = (self.instance.disk_template, self.op.disk_template)
3479
      try:
3480
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3481
      except:
3482
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3483
        raise
3484
      result.append(("disk_template", self.op.disk_template))
3485

    
3486
      assert self.instance.disk_template == self.op.disk_template, \
3487
        ("Expected disk template '%s', found '%s'" %
3488
         (self.op.disk_template, self.instance.disk_template))
3489

    
3490
    # Release node and resource locks if there are any (they might already have
3491
    # been released during disk conversion)
3492
    ReleaseLocks(self, locking.LEVEL_NODE)
3493
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3494

    
3495
    # Apply NIC changes
3496
    if self._new_nics is not None:
3497
      self.instance.nics = self._new_nics
3498
      result.extend(self._nic_chgdesc)
3499

    
3500
    # hvparams changes
3501
    if self.op.hvparams:
3502
      self.instance.hvparams = self.hv_inst
3503
      for key, val in self.op.hvparams.iteritems():
3504
        result.append(("hv/%s" % key, val))
3505

    
3506
    # beparams changes
3507
    if self.op.beparams:
3508
      self.instance.beparams = self.be_inst
3509
      for key, val in self.op.beparams.iteritems():
3510
        result.append(("be/%s" % key, val))
3511

    
3512
    # OS change
3513
    if self.op.os_name:
3514
      self.instance.os = self.op.os_name
3515

    
3516
    # osparams changes
3517
    if self.op.osparams:
3518
      self.instance.osparams = self.os_inst
3519
      for key, val in self.op.osparams.iteritems():
3520
        result.append(("os/%s" % key, val))
3521

    
3522
    if self.op.offline is None:
3523
      # Ignore
3524
      pass
3525
    elif self.op.offline:
3526
      # Mark instance as offline
3527
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3528
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3529
    else:
3530
      # Mark instance as online, but stopped
3531
      self.cfg.MarkInstanceDown(self.instance.uuid)
3532
      result.append(("admin_state", constants.ADMINST_DOWN))
3533

    
3534
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3535

    
3536
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3537
                self.owned_locks(locking.LEVEL_NODE)), \
3538
      "All node locks should have been released by now"
3539

    
3540
    return result
3541

    
3542
  _DISK_CONVERSIONS = {
3543
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3544
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3545
    }
3546

    
3547

    
3548
class LUInstanceChangeGroup(LogicalUnit):
3549
  HPATH = "instance-change-group"
3550
  HTYPE = constants.HTYPE_INSTANCE
3551
  REQ_BGL = False
3552

    
3553
  def ExpandNames(self):
3554
    self.share_locks = ShareAll()
3555

    
3556
    self.needed_locks = {
3557
      locking.LEVEL_NODEGROUP: [],
3558
      locking.LEVEL_NODE: [],
3559
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3560
      }
3561

    
3562
    self._ExpandAndLockInstance()
3563

    
3564
    if self.op.target_groups:
3565
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3566
                                  self.op.target_groups)
3567
    else:
3568
      self.req_target_uuids = None
3569

    
3570
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3571

    
3572
  def DeclareLocks(self, level):
3573
    if level == locking.LEVEL_NODEGROUP:
3574
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3575

    
3576
      if self.req_target_uuids:
3577
        lock_groups = set(self.req_target_uuids)
3578

    
3579
        # Lock all groups used by instance optimistically; this requires going
3580
        # via the node before it's locked, requiring verification later on
3581
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3582
        lock_groups.update(instance_groups)
3583
      else:
3584
        # No target groups, need to lock all of them
3585
        lock_groups = locking.ALL_SET
3586

    
3587
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3588

    
3589
    elif level == locking.LEVEL_NODE:
3590
      if self.req_target_uuids:
3591
        # Lock all nodes used by instances
3592
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3593
        self._LockInstancesNodes()
3594

    
3595
        # Lock all nodes in all potential target groups
3596
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3597
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3598
        member_nodes = [node_uuid
3599
                        for group in lock_groups
3600
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3601
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3602
      else:
3603
        # Lock all nodes as all groups are potential targets
3604
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3605

    
3606
  def CheckPrereq(self):
3607
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3608
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3609
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3610

    
3611
    assert (self.req_target_uuids is None or
3612
            owned_groups.issuperset(self.req_target_uuids))
3613
    assert owned_instance_names == set([self.op.instance_name])
3614

    
3615
    # Get instance information
3616
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3617

    
3618
    # Check if node groups for locked instance are still correct
3619
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3620
      ("Instance %s's nodes changed while we kept the lock" %
3621
       self.op.instance_name)
3622

    
3623
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3624
                                          owned_groups)
3625

    
3626
    if self.req_target_uuids:
3627
      # User requested specific target groups
3628
      self.target_uuids = frozenset(self.req_target_uuids)
3629
    else:
3630
      # All groups except those used by the instance are potential targets
3631
      self.target_uuids = owned_groups - inst_groups
3632

    
3633
    conflicting_groups = self.target_uuids & inst_groups
3634
    if conflicting_groups:
3635
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3636
                                 " used by the instance '%s'" %
3637
                                 (utils.CommaJoin(conflicting_groups),
3638
                                  self.op.instance_name),
3639
                                 errors.ECODE_INVAL)
3640

    
3641
    if not self.target_uuids:
3642
      raise errors.OpPrereqError("There are no possible target groups",
3643
                                 errors.ECODE_INVAL)
3644

    
3645
  def BuildHooksEnv(self):
3646
    """Build hooks env.
3647

3648
    """
3649
    assert self.target_uuids
3650

    
3651
    env = {
3652
      "TARGET_GROUPS": " ".join(self.target_uuids),
3653
      }
3654

    
3655
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3656

    
3657
    return env
3658

    
3659
  def BuildHooksNodes(self):
3660
    """Build hooks nodes.
3661

3662
    """
3663
    mn = self.cfg.GetMasterNode()
3664
    return ([mn], [mn])
3665

    
3666
  def Exec(self, feedback_fn):
3667
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3668

    
3669
    assert instances == [self.op.instance_name], "Instance not locked"
3670

    
3671
    req = iallocator.IAReqGroupChange(instances=instances,
3672
                                      target_groups=list(self.target_uuids))
3673
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3674

    
3675
    ial.Run(self.op.iallocator)
3676

    
3677
    if not ial.success:
3678
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3679
                                 " instance '%s' using iallocator '%s': %s" %
3680
                                 (self.op.instance_name, self.op.iallocator,
3681
                                  ial.info), errors.ECODE_NORES)
3682

    
3683
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3684

    
3685
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3686
                 " instance '%s'", len(jobs), self.op.instance_name)
3687

    
3688
    return ResultWithJobs(jobs)