Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 24711492

History | View | Annotate | Download (145.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
187
      raise errors.OpPrereqError("VLAN is given, but network mode is not"
188
                                 " openvswitch", errors.ECODE_INVAL)
189

    
190
    # ip validity checks
191
    if ip is None or ip.lower() == constants.VALUE_NONE:
192
      nic_ip = None
193
    elif ip.lower() == constants.VALUE_AUTO:
194
      if not op.name_check:
195
        raise errors.OpPrereqError("IP address set to auto but name checks"
196
                                   " have been skipped",
197
                                   errors.ECODE_INVAL)
198
      nic_ip = default_ip
199
    else:
200
      # We defer pool operations until later, so that the iallocator has
201
      # filled in the instance's node(s) dimara
202
      if ip.lower() == constants.NIC_IP_POOL:
203
        if net is None:
204
          raise errors.OpPrereqError("if ip=pool, parameter network"
205
                                     " must be passed too",
206
                                     errors.ECODE_INVAL)
207

    
208
      elif not netutils.IPAddress.IsValid(ip):
209
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210
                                   errors.ECODE_INVAL)
211

    
212
      nic_ip = ip
213

    
214
    # TODO: check the ip address for uniqueness
215
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
217
                                 errors.ECODE_INVAL)
218

    
219
    # MAC address verification
220
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222
      mac = utils.NormalizeAndValidateMac(mac)
223

    
224
      try:
225
        # TODO: We need to factor this out
226
        cfg.ReserveMAC(mac, ec_id)
227
      except errors.ReservationError:
228
        raise errors.OpPrereqError("MAC address %s already in use"
229
                                   " in cluster" % mac,
230
                                   errors.ECODE_NOTUNIQUE)
231

    
232
    #  Build nic parameters
233
    nicparams = {}
234
    if nic_mode_req:
235
      nicparams[constants.NIC_MODE] = nic_mode
236
    if link:
237
      nicparams[constants.NIC_LINK] = link
238
    if vlan:
239
      nicparams[constants.NIC_VLAN] = vlan
240

    
241
    check_params = cluster.SimpleFillNIC(nicparams)
242
    objects.NIC.CheckParameterSyntax(check_params)
243
    net_uuid = cfg.LookupNetwork(net)
244
    name = nic.get(constants.INIC_NAME, None)
245
    if name is not None and name.lower() == constants.VALUE_NONE:
246
      name = None
247
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248
                          network=net_uuid, nicparams=nicparams)
249
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
250
    nics.append(nic_obj)
251

    
252
  return nics
253

    
254

    
255
def _CheckForConflictingIp(lu, ip, node_uuid):
256
  """In case of conflicting IP address raise error.
257

258
  @type ip: string
259
  @param ip: IP address
260
  @type node_uuid: string
261
  @param node_uuid: node UUID
262

263
  """
264
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
265
  if conf_net is not None:
266
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267
                                " network %s, but the target NIC does not." %
268
                                (ip, conf_net)),
269
                               errors.ECODE_STATE)
270

    
271
  return (None, None)
272

    
273

    
274
def _ComputeIPolicyInstanceSpecViolation(
275
  ipolicy, instance_spec, disk_template,
276
  _compute_fn=ComputeIPolicySpecViolation):
277
  """Compute if instance specs meets the specs of ipolicy.
278

279
  @type ipolicy: dict
280
  @param ipolicy: The ipolicy to verify against
281
  @param instance_spec: dict
282
  @param instance_spec: The instance spec to verify
283
  @type disk_template: string
284
  @param disk_template: the disk template of the instance
285
  @param _compute_fn: The function to verify ipolicy (unittest only)
286
  @see: L{ComputeIPolicySpecViolation}
287

288
  """
289
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
295

    
296
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297
                     disk_sizes, spindle_use, disk_template)
298

    
299

    
300
def _CheckOSVariant(os_obj, name):
301
  """Check whether an OS name conforms to the os variants specification.
302

303
  @type os_obj: L{objects.OS}
304
  @param os_obj: OS object to check
305
  @type name: string
306
  @param name: OS name passed by the user, to check for validity
307

308
  """
309
  variant = objects.OS.GetVariant(name)
310
  if not os_obj.supported_variants:
311
    if variant:
312
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313
                                 " passed)" % (os_obj.name, variant),
314
                                 errors.ECODE_INVAL)
315
    return
316
  if not variant:
317
    raise errors.OpPrereqError("OS name must include a variant",
318
                               errors.ECODE_INVAL)
319

    
320
  if variant not in os_obj.supported_variants:
321
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
322

    
323

    
324
class LUInstanceCreate(LogicalUnit):
325
  """Create an instance.
326

327
  """
328
  HPATH = "instance-add"
329
  HTYPE = constants.HTYPE_INSTANCE
330
  REQ_BGL = False
331

    
332
  def _CheckDiskTemplateValid(self):
333
    """Checks validity of disk template.
334

335
    """
336
    cluster = self.cfg.GetClusterInfo()
337
    if self.op.disk_template is None:
338
      # FIXME: It would be better to take the default disk template from the
339
      # ipolicy, but for the ipolicy we need the primary node, which we get from
340
      # the iallocator, which wants the disk template as input. To solve this
341
      # chicken-and-egg problem, it should be possible to specify just a node
342
      # group from the iallocator and take the ipolicy from that.
343
      self.op.disk_template = cluster.enabled_disk_templates[0]
344
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
345

    
346
  def _CheckDiskArguments(self):
347
    """Checks validity of disk-related arguments.
348

349
    """
350
    # check that disk's names are unique and valid
351
    utils.ValidateDeviceNames("disk", self.op.disks)
352

    
353
    self._CheckDiskTemplateValid()
354

    
355
    # check disks. parameter names and consistent adopt/no-adopt strategy
356
    has_adopt = has_no_adopt = False
357
    for disk in self.op.disks:
358
      if self.op.disk_template != constants.DT_EXT:
359
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
360
      if constants.IDISK_ADOPT in disk:
361
        has_adopt = True
362
      else:
363
        has_no_adopt = True
364
    if has_adopt and has_no_adopt:
365
      raise errors.OpPrereqError("Either all disks are adopted or none is",
366
                                 errors.ECODE_INVAL)
367
    if has_adopt:
368
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
369
        raise errors.OpPrereqError("Disk adoption is not supported for the"
370
                                   " '%s' disk template" %
371
                                   self.op.disk_template,
372
                                   errors.ECODE_INVAL)
373
      if self.op.iallocator is not None:
374
        raise errors.OpPrereqError("Disk adoption not allowed with an"
375
                                   " iallocator script", errors.ECODE_INVAL)
376
      if self.op.mode == constants.INSTANCE_IMPORT:
377
        raise errors.OpPrereqError("Disk adoption not allowed for"
378
                                   " instance import", errors.ECODE_INVAL)
379
    else:
380
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
381
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
382
                                   " but no 'adopt' parameter given" %
383
                                   self.op.disk_template,
384
                                   errors.ECODE_INVAL)
385

    
386
    self.adopt_disks = has_adopt
387

    
388
  def _CheckVLANArguments(self):
389
    """ Check validity of VLANs if given
390

391
    """
392
    for nic in self.op.nics:
393
      vlan = nic.get(constants.INIC_VLAN, None)
394
      if vlan:
395
        if vlan[0] == ".":
396
          # vlan starting with dot means single untagged vlan,
397
          # might be followed by trunk (:)
398
          if not vlan[1:].isdigit():
399
            vlanlist = vlan[1:].split(':')
400
            for vl in vlanlist:
401
              if not vl.isdigit():
402
                raise errors.OpPrereqError("Specified VLAN parameter is "
403
                                           "invalid : %s" % vlan,
404
                                             errors.ECODE_INVAL)
405
        elif vlan[0] == ":":
406
          # Trunk - tagged only
407
          vlanlist = vlan[1:].split(':')
408
          for vl in vlanlist:
409
            if not vl.isdigit():
410
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                           " : %s" % vlan, errors.ECODE_INVAL)
412
        elif vlan.isdigit():
413
          # This is the simplest case. No dots, only single digit
414
          # -> Create untagged access port, dot needs to be added
415
          nic[constants.INIC_VLAN] = "." + vlan
416
        else:
417
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418
                                       " : %s" % vlan, errors.ECODE_INVAL)
419

    
420
  def CheckArguments(self):
421
    """Check arguments.
422

423
    """
424
    # do not require name_check to ease forward/backward compatibility
425
    # for tools
426
    if self.op.no_install and self.op.start:
427
      self.LogInfo("No-installation mode selected, disabling startup")
428
      self.op.start = False
429
    # validate/normalize the instance name
430
    self.op.instance_name = \
431
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
432

    
433
    if self.op.ip_check and not self.op.name_check:
434
      # TODO: make the ip check more flexible and not depend on the name check
435
      raise errors.OpPrereqError("Cannot do IP address check without a name"
436
                                 " check", errors.ECODE_INVAL)
437

    
438
    # check nics' parameter names
439
    for nic in self.op.nics:
440
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
441
    # check that NIC's parameters names are unique and valid
442
    utils.ValidateDeviceNames("NIC", self.op.nics)
443

    
444
    self._CheckVLANArguments()
445

    
446
    self._CheckDiskArguments()
447
    assert self.op.disk_template is not None
448

    
449
    # instance name verification
450
    if self.op.name_check:
451
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
452
      self.op.instance_name = self.hostname.name
453
      # used in CheckPrereq for ip ping check
454
      self.check_ip = self.hostname.ip
455
    else:
456
      self.check_ip = None
457

    
458
    # file storage checks
459
    if (self.op.file_driver and
460
        not self.op.file_driver in constants.FILE_DRIVER):
461
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
462
                                 self.op.file_driver, errors.ECODE_INVAL)
463

    
464
    # set default file_driver if unset and required
465
    if (not self.op.file_driver and
466
        self.op.disk_template in [constants.DT_FILE,
467
                                  constants.DT_SHARED_FILE]):
468
      self.op.file_driver = constants.FD_LOOP
469

    
470
    ### Node/iallocator related checks
471
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
472

    
473
    if self.op.pnode is not None:
474
      if self.op.disk_template in constants.DTS_INT_MIRROR:
475
        if self.op.snode is None:
476
          raise errors.OpPrereqError("The networked disk templates need"
477
                                     " a mirror node", errors.ECODE_INVAL)
478
      elif self.op.snode:
479
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
480
                        " template")
481
        self.op.snode = None
482

    
483
    _CheckOpportunisticLocking(self.op)
484

    
485
    if self.op.mode == constants.INSTANCE_IMPORT:
486
      # On import force_variant must be True, because if we forced it at
487
      # initial install, our only chance when importing it back is that it
488
      # works again!
489
      self.op.force_variant = True
490

    
491
      if self.op.no_install:
492
        self.LogInfo("No-installation mode has no effect during import")
493

    
494
    elif self.op.mode == constants.INSTANCE_CREATE:
495
      if self.op.os_type is None:
496
        raise errors.OpPrereqError("No guest OS specified",
497
                                   errors.ECODE_INVAL)
498
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
499
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
500
                                   " installation" % self.op.os_type,
501
                                   errors.ECODE_STATE)
502
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
503
      self._cds = GetClusterDomainSecret()
504

    
505
      # Check handshake to ensure both clusters have the same domain secret
506
      src_handshake = self.op.source_handshake
507
      if not src_handshake:
508
        raise errors.OpPrereqError("Missing source handshake",
509
                                   errors.ECODE_INVAL)
510

    
511
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
512
                                                           src_handshake)
513
      if errmsg:
514
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
515
                                   errors.ECODE_INVAL)
516

    
517
      # Load and check source CA
518
      self.source_x509_ca_pem = self.op.source_x509_ca
519
      if not self.source_x509_ca_pem:
520
        raise errors.OpPrereqError("Missing source X509 CA",
521
                                   errors.ECODE_INVAL)
522

    
523
      try:
524
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
525
                                                    self._cds)
526
      except OpenSSL.crypto.Error, err:
527
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
528
                                   (err, ), errors.ECODE_INVAL)
529

    
530
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
531
      if errcode is not None:
532
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
533
                                   errors.ECODE_INVAL)
534

    
535
      self.source_x509_ca = cert
536

    
537
      src_instance_name = self.op.source_instance_name
538
      if not src_instance_name:
539
        raise errors.OpPrereqError("Missing source instance name",
540
                                   errors.ECODE_INVAL)
541

    
542
      self.source_instance_name = \
543
        netutils.GetHostname(name=src_instance_name).name
544

    
545
    else:
546
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
547
                                 self.op.mode, errors.ECODE_INVAL)
548

    
549
  def ExpandNames(self):
550
    """ExpandNames for CreateInstance.
551

552
    Figure out the right locks for instance creation.
553

554
    """
555
    self.needed_locks = {}
556

    
557
    # this is just a preventive check, but someone might still add this
558
    # instance in the meantime, and creation will fail at lock-add time
559
    if self.op.instance_name in\
560
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
561
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
562
                                 self.op.instance_name, errors.ECODE_EXISTS)
563

    
564
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
565

    
566
    if self.op.iallocator:
567
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
568
      # specifying a group on instance creation and then selecting nodes from
569
      # that group
570
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
571
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
572

    
573
      if self.op.opportunistic_locking:
574
        self.opportunistic_locks[locking.LEVEL_NODE] = True
575
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
576
    else:
577
      (self.op.pnode_uuid, self.op.pnode) = \
578
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
579
      nodelist = [self.op.pnode_uuid]
580
      if self.op.snode is not None:
581
        (self.op.snode_uuid, self.op.snode) = \
582
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
583
        nodelist.append(self.op.snode_uuid)
584
      self.needed_locks[locking.LEVEL_NODE] = nodelist
585

    
586
    # in case of import lock the source node too
587
    if self.op.mode == constants.INSTANCE_IMPORT:
588
      src_node = self.op.src_node
589
      src_path = self.op.src_path
590

    
591
      if src_path is None:
592
        self.op.src_path = src_path = self.op.instance_name
593

    
594
      if src_node is None:
595
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
596
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
597
        self.op.src_node = None
598
        if os.path.isabs(src_path):
599
          raise errors.OpPrereqError("Importing an instance from a path"
600
                                     " requires a source node option",
601
                                     errors.ECODE_INVAL)
602
      else:
603
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
604
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
605
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
606
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
607
        if not os.path.isabs(src_path):
608
          self.op.src_path = \
609
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
610

    
611
    self.needed_locks[locking.LEVEL_NODE_RES] = \
612
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
613

    
614
  def _RunAllocator(self):
615
    """Run the allocator based on input opcode.
616

617
    """
618
    if self.op.opportunistic_locking:
619
      # Only consider nodes for which a lock is held
620
      node_name_whitelist = self.cfg.GetNodeNames(
621
        self.owned_locks(locking.LEVEL_NODE))
622
    else:
623
      node_name_whitelist = None
624

    
625
    req = _CreateInstanceAllocRequest(self.op, self.disks,
626
                                      self.nics, self.be_full,
627
                                      node_name_whitelist)
628
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
629

    
630
    ial.Run(self.op.iallocator)
631

    
632
    if not ial.success:
633
      # When opportunistic locks are used only a temporary failure is generated
634
      if self.op.opportunistic_locking:
635
        ecode = errors.ECODE_TEMP_NORES
636
      else:
637
        ecode = errors.ECODE_NORES
638

    
639
      raise errors.OpPrereqError("Can't compute nodes using"
640
                                 " iallocator '%s': %s" %
641
                                 (self.op.iallocator, ial.info),
642
                                 ecode)
643

    
644
    (self.op.pnode_uuid, self.op.pnode) = \
645
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
646
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
647
                 self.op.instance_name, self.op.iallocator,
648
                 utils.CommaJoin(ial.result))
649

    
650
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
651

    
652
    if req.RequiredNodes() == 2:
653
      (self.op.snode_uuid, self.op.snode) = \
654
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
655

    
656
  def BuildHooksEnv(self):
657
    """Build hooks env.
658

659
    This runs on master, primary and secondary nodes of the instance.
660

661
    """
662
    env = {
663
      "ADD_MODE": self.op.mode,
664
      }
665
    if self.op.mode == constants.INSTANCE_IMPORT:
666
      env["SRC_NODE"] = self.op.src_node
667
      env["SRC_PATH"] = self.op.src_path
668
      env["SRC_IMAGES"] = self.src_images
669

    
670
    env.update(BuildInstanceHookEnv(
671
      name=self.op.instance_name,
672
      primary_node_name=self.op.pnode,
673
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
674
      status=self.op.start,
675
      os_type=self.op.os_type,
676
      minmem=self.be_full[constants.BE_MINMEM],
677
      maxmem=self.be_full[constants.BE_MAXMEM],
678
      vcpus=self.be_full[constants.BE_VCPUS],
679
      nics=NICListToTuple(self, self.nics),
680
      disk_template=self.op.disk_template,
681
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
682
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
683
             for d in self.disks],
684
      bep=self.be_full,
685
      hvp=self.hv_full,
686
      hypervisor_name=self.op.hypervisor,
687
      tags=self.op.tags,
688
      ))
689

    
690
    return env
691

    
692
  def BuildHooksNodes(self):
693
    """Build hooks nodes.
694

695
    """
696
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
697
    return nl, nl
698

    
699
  def _ReadExportInfo(self):
700
    """Reads the export information from disk.
701

702
    It will override the opcode source node and path with the actual
703
    information, if these two were not specified before.
704

705
    @return: the export information
706

707
    """
708
    assert self.op.mode == constants.INSTANCE_IMPORT
709

    
710
    if self.op.src_node_uuid is None:
711
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
712
      exp_list = self.rpc.call_export_list(locked_nodes)
713
      found = False
714
      for node_uuid in exp_list:
715
        if exp_list[node_uuid].fail_msg:
716
          continue
717
        if self.op.src_path in exp_list[node_uuid].payload:
718
          found = True
719
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
720
          self.op.src_node_uuid = node_uuid
721
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
722
                                            self.op.src_path)
723
          break
724
      if not found:
725
        raise errors.OpPrereqError("No export found for relative path %s" %
726
                                   self.op.src_path, errors.ECODE_INVAL)
727

    
728
    CheckNodeOnline(self, self.op.src_node_uuid)
729
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
730
    result.Raise("No export or invalid export found in dir %s" %
731
                 self.op.src_path)
732

    
733
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
734
    if not export_info.has_section(constants.INISECT_EXP):
735
      raise errors.ProgrammerError("Corrupted export config",
736
                                   errors.ECODE_ENVIRON)
737

    
738
    ei_version = export_info.get(constants.INISECT_EXP, "version")
739
    if int(ei_version) != constants.EXPORT_VERSION:
740
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
741
                                 (ei_version, constants.EXPORT_VERSION),
742
                                 errors.ECODE_ENVIRON)
743
    return export_info
744

    
745
  def _ReadExportParams(self, einfo):
746
    """Use export parameters as defaults.
747

748
    In case the opcode doesn't specify (as in override) some instance
749
    parameters, then try to use them from the export information, if
750
    that declares them.
751

752
    """
753
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
754

    
755
    if not self.op.disks:
756
      disks = []
757
      # TODO: import the disk iv_name too
758
      for idx in range(constants.MAX_DISKS):
759
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
760
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
761
          disks.append({constants.IDISK_SIZE: disk_sz})
762
      self.op.disks = disks
763
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
764
        raise errors.OpPrereqError("No disk info specified and the export"
765
                                   " is missing the disk information",
766
                                   errors.ECODE_INVAL)
767

    
768
    if not self.op.nics:
769
      nics = []
770
      for idx in range(constants.MAX_NICS):
771
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
772
          ndict = {}
773
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
774
            nic_param_name = "nic%d_%s" % (idx, name)
775
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
776
              v = einfo.get(constants.INISECT_INS, nic_param_name)
777
              ndict[name] = v
778
          nics.append(ndict)
779
        else:
780
          break
781
      self.op.nics = nics
782

    
783
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
784
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
785

    
786
    if (self.op.hypervisor is None and
787
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
788
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
789

    
790
    if einfo.has_section(constants.INISECT_HYP):
791
      # use the export parameters but do not override the ones
792
      # specified by the user
793
      for name, value in einfo.items(constants.INISECT_HYP):
794
        if name not in self.op.hvparams:
795
          self.op.hvparams[name] = value
796

    
797
    if einfo.has_section(constants.INISECT_BEP):
798
      # use the parameters, without overriding
799
      for name, value in einfo.items(constants.INISECT_BEP):
800
        if name not in self.op.beparams:
801
          self.op.beparams[name] = value
802
        # Compatibility for the old "memory" be param
803
        if name == constants.BE_MEMORY:
804
          if constants.BE_MAXMEM not in self.op.beparams:
805
            self.op.beparams[constants.BE_MAXMEM] = value
806
          if constants.BE_MINMEM not in self.op.beparams:
807
            self.op.beparams[constants.BE_MINMEM] = value
808
    else:
809
      # try to read the parameters old style, from the main section
810
      for name in constants.BES_PARAMETERS:
811
        if (name not in self.op.beparams and
812
            einfo.has_option(constants.INISECT_INS, name)):
813
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
814

    
815
    if einfo.has_section(constants.INISECT_OSP):
816
      # use the parameters, without overriding
817
      for name, value in einfo.items(constants.INISECT_OSP):
818
        if name not in self.op.osparams:
819
          self.op.osparams[name] = value
820

    
821
  def _RevertToDefaults(self, cluster):
822
    """Revert the instance parameters to the default values.
823

824
    """
825
    # hvparams
826
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
827
    for name in self.op.hvparams.keys():
828
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
829
        del self.op.hvparams[name]
830
    # beparams
831
    be_defs = cluster.SimpleFillBE({})
832
    for name in self.op.beparams.keys():
833
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
834
        del self.op.beparams[name]
835
    # nic params
836
    nic_defs = cluster.SimpleFillNIC({})
837
    for nic in self.op.nics:
838
      for name in constants.NICS_PARAMETERS:
839
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
840
          del nic[name]
841
    # osparams
842
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
843
    for name in self.op.osparams.keys():
844
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
845
        del self.op.osparams[name]
846

    
847
  def _CalculateFileStorageDir(self):
848
    """Calculate final instance file storage dir.
849

850
    """
851
    # file storage dir calculation/check
852
    self.instance_file_storage_dir = None
853
    if self.op.disk_template in constants.DTS_FILEBASED:
854
      # build the full file storage dir path
855
      joinargs = []
856

    
857
      if self.op.disk_template == constants.DT_SHARED_FILE:
858
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
859
      else:
860
        get_fsd_fn = self.cfg.GetFileStorageDir
861

    
862
      cfg_storagedir = get_fsd_fn()
863
      if not cfg_storagedir:
864
        raise errors.OpPrereqError("Cluster file storage dir not defined",
865
                                   errors.ECODE_STATE)
866
      joinargs.append(cfg_storagedir)
867

    
868
      if self.op.file_storage_dir is not None:
869
        joinargs.append(self.op.file_storage_dir)
870

    
871
      joinargs.append(self.op.instance_name)
872

    
873
      # pylint: disable=W0142
874
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
875

    
876
  def CheckPrereq(self): # pylint: disable=R0914
877
    """Check prerequisites.
878

879
    """
880
    self._CalculateFileStorageDir()
881

    
882
    if self.op.mode == constants.INSTANCE_IMPORT:
883
      export_info = self._ReadExportInfo()
884
      self._ReadExportParams(export_info)
885
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
886
    else:
887
      self._old_instance_name = None
888

    
889
    if (not self.cfg.GetVGName() and
890
        self.op.disk_template not in constants.DTS_NOT_LVM):
891
      raise errors.OpPrereqError("Cluster does not support lvm-based"
892
                                 " instances", errors.ECODE_STATE)
893

    
894
    if (self.op.hypervisor is None or
895
        self.op.hypervisor == constants.VALUE_AUTO):
896
      self.op.hypervisor = self.cfg.GetHypervisorType()
897

    
898
    cluster = self.cfg.GetClusterInfo()
899
    enabled_hvs = cluster.enabled_hypervisors
900
    if self.op.hypervisor not in enabled_hvs:
901
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
902
                                 " cluster (%s)" %
903
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
904
                                 errors.ECODE_STATE)
905

    
906
    # Check tag validity
907
    for tag in self.op.tags:
908
      objects.TaggableObject.ValidateTag(tag)
909

    
910
    # check hypervisor parameter syntax (locally)
911
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
912
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
913
                                      self.op.hvparams)
914
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
915
    hv_type.CheckParameterSyntax(filled_hvp)
916
    self.hv_full = filled_hvp
917
    # check that we don't specify global parameters on an instance
918
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
919
                         "instance", "cluster")
920

    
921
    # fill and remember the beparams dict
922
    self.be_full = _ComputeFullBeParams(self.op, cluster)
923

    
924
    # build os parameters
925
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
926

    
927
    # now that hvp/bep are in final format, let's reset to defaults,
928
    # if told to do so
929
    if self.op.identify_defaults:
930
      self._RevertToDefaults(cluster)
931

    
932
    # NIC buildup
933
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
934
                             self.proc.GetECId())
935

    
936
    # disk checks/pre-build
937
    default_vg = self.cfg.GetVGName()
938
    self.disks = ComputeDisks(self.op, default_vg)
939

    
940
    if self.op.mode == constants.INSTANCE_IMPORT:
941
      disk_images = []
942
      for idx in range(len(self.disks)):
943
        option = "disk%d_dump" % idx
944
        if export_info.has_option(constants.INISECT_INS, option):
945
          # FIXME: are the old os-es, disk sizes, etc. useful?
946
          export_name = export_info.get(constants.INISECT_INS, option)
947
          image = utils.PathJoin(self.op.src_path, export_name)
948
          disk_images.append(image)
949
        else:
950
          disk_images.append(False)
951

    
952
      self.src_images = disk_images
953

    
954
      if self.op.instance_name == self._old_instance_name:
955
        for idx, nic in enumerate(self.nics):
956
          if nic.mac == constants.VALUE_AUTO:
957
            nic_mac_ini = "nic%d_mac" % idx
958
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
959

    
960
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
961

    
962
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
963
    if self.op.ip_check:
964
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
965
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
966
                                   (self.check_ip, self.op.instance_name),
967
                                   errors.ECODE_NOTUNIQUE)
968

    
969
    #### mac address generation
970
    # By generating here the mac address both the allocator and the hooks get
971
    # the real final mac address rather than the 'auto' or 'generate' value.
972
    # There is a race condition between the generation and the instance object
973
    # creation, which means that we know the mac is valid now, but we're not
974
    # sure it will be when we actually add the instance. If things go bad
975
    # adding the instance will abort because of a duplicate mac, and the
976
    # creation job will fail.
977
    for nic in self.nics:
978
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
979
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
980

    
981
    #### allocator run
982

    
983
    if self.op.iallocator is not None:
984
      self._RunAllocator()
985

    
986
    # Release all unneeded node locks
987
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
988
                               self.op.src_node_uuid])
989
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
990
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
991
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
992

    
993
    assert (self.owned_locks(locking.LEVEL_NODE) ==
994
            self.owned_locks(locking.LEVEL_NODE_RES)), \
995
      "Node locks differ from node resource locks"
996

    
997
    #### node related checks
998

    
999
    # check primary node
1000
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1001
    assert self.pnode is not None, \
1002
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1003
    if pnode.offline:
1004
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1005
                                 pnode.name, errors.ECODE_STATE)
1006
    if pnode.drained:
1007
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1008
                                 pnode.name, errors.ECODE_STATE)
1009
    if not pnode.vm_capable:
1010
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1011
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1012

    
1013
    self.secondaries = []
1014

    
1015
    # Fill in any IPs from IP pools. This must happen here, because we need to
1016
    # know the nic's primary node, as specified by the iallocator
1017
    for idx, nic in enumerate(self.nics):
1018
      net_uuid = nic.network
1019
      if net_uuid is not None:
1020
        nobj = self.cfg.GetNetwork(net_uuid)
1021
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1022
        if netparams is None:
1023
          raise errors.OpPrereqError("No netparams found for network"
1024
                                     " %s. Probably not connected to"
1025
                                     " node's %s nodegroup" %
1026
                                     (nobj.name, self.pnode.name),
1027
                                     errors.ECODE_INVAL)
1028
        self.LogInfo("NIC/%d inherits netparams %s" %
1029
                     (idx, netparams.values()))
1030
        nic.nicparams = dict(netparams)
1031
        if nic.ip is not None:
1032
          if nic.ip.lower() == constants.NIC_IP_POOL:
1033
            try:
1034
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1035
            except errors.ReservationError:
1036
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1037
                                         " from the address pool" % idx,
1038
                                         errors.ECODE_STATE)
1039
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1040
          else:
1041
            try:
1042
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1043
            except errors.ReservationError:
1044
              raise errors.OpPrereqError("IP address %s already in use"
1045
                                         " or does not belong to network %s" %
1046
                                         (nic.ip, nobj.name),
1047
                                         errors.ECODE_NOTUNIQUE)
1048

    
1049
      # net is None, ip None or given
1050
      elif self.op.conflicts_check:
1051
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1052

    
1053
    # mirror node verification
1054
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1055
      if self.op.snode_uuid == pnode.uuid:
1056
        raise errors.OpPrereqError("The secondary node cannot be the"
1057
                                   " primary node", errors.ECODE_INVAL)
1058
      CheckNodeOnline(self, self.op.snode_uuid)
1059
      CheckNodeNotDrained(self, self.op.snode_uuid)
1060
      CheckNodeVmCapable(self, self.op.snode_uuid)
1061
      self.secondaries.append(self.op.snode_uuid)
1062

    
1063
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1064
      if pnode.group != snode.group:
1065
        self.LogWarning("The primary and secondary nodes are in two"
1066
                        " different node groups; the disk parameters"
1067
                        " from the first disk's node group will be"
1068
                        " used")
1069

    
1070
    nodes = [pnode]
1071
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1072
      nodes.append(snode)
1073
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1074
    excl_stor = compat.any(map(has_es, nodes))
1075
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1076
      raise errors.OpPrereqError("Disk template %s not supported with"
1077
                                 " exclusive storage" % self.op.disk_template,
1078
                                 errors.ECODE_STATE)
1079
    for disk in self.disks:
1080
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1081

    
1082
    node_uuids = [pnode.uuid] + self.secondaries
1083

    
1084
    if not self.adopt_disks:
1085
      if self.op.disk_template == constants.DT_RBD:
1086
        # _CheckRADOSFreeSpace() is just a placeholder.
1087
        # Any function that checks prerequisites can be placed here.
1088
        # Check if there is enough space on the RADOS cluster.
1089
        CheckRADOSFreeSpace()
1090
      elif self.op.disk_template == constants.DT_EXT:
1091
        # FIXME: Function that checks prereqs if needed
1092
        pass
1093
      elif self.op.disk_template in constants.DTS_LVM:
1094
        # Check lv size requirements, if not adopting
1095
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1096
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1097
      else:
1098
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1099
        pass
1100

    
1101
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1102
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1103
                                disk[constants.IDISK_ADOPT])
1104
                     for disk in self.disks])
1105
      if len(all_lvs) != len(self.disks):
1106
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1107
                                   errors.ECODE_INVAL)
1108
      for lv_name in all_lvs:
1109
        try:
1110
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1111
          # to ReserveLV uses the same syntax
1112
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1113
        except errors.ReservationError:
1114
          raise errors.OpPrereqError("LV named %s used by another instance" %
1115
                                     lv_name, errors.ECODE_NOTUNIQUE)
1116

    
1117
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1118
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1119

    
1120
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1121
                                       vg_names.payload.keys())[pnode.uuid]
1122
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1123
      node_lvs = node_lvs.payload
1124

    
1125
      delta = all_lvs.difference(node_lvs.keys())
1126
      if delta:
1127
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1128
                                   utils.CommaJoin(delta),
1129
                                   errors.ECODE_INVAL)
1130
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1131
      if online_lvs:
1132
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1133
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1134
                                   errors.ECODE_STATE)
1135
      # update the size of disk based on what is found
1136
      for dsk in self.disks:
1137
        dsk[constants.IDISK_SIZE] = \
1138
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1139
                                        dsk[constants.IDISK_ADOPT])][0]))
1140

    
1141
    elif self.op.disk_template == constants.DT_BLOCK:
1142
      # Normalize and de-duplicate device paths
1143
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1144
                       for disk in self.disks])
1145
      if len(all_disks) != len(self.disks):
1146
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1147
                                   errors.ECODE_INVAL)
1148
      baddisks = [d for d in all_disks
1149
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1150
      if baddisks:
1151
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1152
                                   " cannot be adopted" %
1153
                                   (utils.CommaJoin(baddisks),
1154
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1155
                                   errors.ECODE_INVAL)
1156

    
1157
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1158
                                            list(all_disks))[pnode.uuid]
1159
      node_disks.Raise("Cannot get block device information from node %s" %
1160
                       pnode.name)
1161
      node_disks = node_disks.payload
1162
      delta = all_disks.difference(node_disks.keys())
1163
      if delta:
1164
        raise errors.OpPrereqError("Missing block device(s): %s" %
1165
                                   utils.CommaJoin(delta),
1166
                                   errors.ECODE_INVAL)
1167
      for dsk in self.disks:
1168
        dsk[constants.IDISK_SIZE] = \
1169
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1170

    
1171
    # Check disk access param to be compatible with specified hypervisor
1172
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1173
    node_group = self.cfg.GetNodeGroup(node_info.group)
1174
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1175
    access_type = disk_params[self.op.disk_template].get(
1176
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1177
    )
1178

    
1179
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1180
                                            self.op.disk_template,
1181
                                            access_type):
1182
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1183
                                 " used with %s disk access param" %
1184
                                 (self.op.hypervisor, access_type),
1185
                                  errors.ECODE_STATE)
1186

    
1187
    # Verify instance specs
1188
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1189
    ispec = {
1190
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1191
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1192
      constants.ISPEC_DISK_COUNT: len(self.disks),
1193
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1194
                                  for disk in self.disks],
1195
      constants.ISPEC_NIC_COUNT: len(self.nics),
1196
      constants.ISPEC_SPINDLE_USE: spindle_use,
1197
      }
1198

    
1199
    group_info = self.cfg.GetNodeGroup(pnode.group)
1200
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1201
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1202
                                               self.op.disk_template)
1203
    if not self.op.ignore_ipolicy and res:
1204
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1205
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1206
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1207

    
1208
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1209

    
1210
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1211
    # check OS parameters (remotely)
1212
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1213

    
1214
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1215

    
1216
    #TODO: _CheckExtParams (remotely)
1217
    # Check parameters for extstorage
1218

    
1219
    # memory check on primary node
1220
    #TODO(dynmem): use MINMEM for checking
1221
    if self.op.start:
1222
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1223
                                self.op.hvparams)
1224
      CheckNodeFreeMemory(self, self.pnode.uuid,
1225
                          "creating instance %s" % self.op.instance_name,
1226
                          self.be_full[constants.BE_MAXMEM],
1227
                          self.op.hypervisor, hvfull)
1228

    
1229
    self.dry_run_result = list(node_uuids)
1230

    
1231
  def Exec(self, feedback_fn):
1232
    """Create and add the instance to the cluster.
1233

1234
    """
1235
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1236
                self.owned_locks(locking.LEVEL_NODE)), \
1237
      "Node locks differ from node resource locks"
1238
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1239

    
1240
    ht_kind = self.op.hypervisor
1241
    if ht_kind in constants.HTS_REQ_PORT:
1242
      network_port = self.cfg.AllocatePort()
1243
    else:
1244
      network_port = None
1245

    
1246
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1247

    
1248
    # This is ugly but we got a chicken-egg problem here
1249
    # We can only take the group disk parameters, as the instance
1250
    # has no disks yet (we are generating them right here).
1251
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1252
    disks = GenerateDiskTemplate(self,
1253
                                 self.op.disk_template,
1254
                                 instance_uuid, self.pnode.uuid,
1255
                                 self.secondaries,
1256
                                 self.disks,
1257
                                 self.instance_file_storage_dir,
1258
                                 self.op.file_driver,
1259
                                 0,
1260
                                 feedback_fn,
1261
                                 self.cfg.GetGroupDiskParams(nodegroup))
1262

    
1263
    iobj = objects.Instance(name=self.op.instance_name,
1264
                            uuid=instance_uuid,
1265
                            os=self.op.os_type,
1266
                            primary_node=self.pnode.uuid,
1267
                            nics=self.nics, disks=disks,
1268
                            disk_template=self.op.disk_template,
1269
                            disks_active=False,
1270
                            admin_state=constants.ADMINST_DOWN,
1271
                            network_port=network_port,
1272
                            beparams=self.op.beparams,
1273
                            hvparams=self.op.hvparams,
1274
                            hypervisor=self.op.hypervisor,
1275
                            osparams=self.op.osparams,
1276
                            )
1277

    
1278
    if self.op.tags:
1279
      for tag in self.op.tags:
1280
        iobj.AddTag(tag)
1281

    
1282
    if self.adopt_disks:
1283
      if self.op.disk_template == constants.DT_PLAIN:
1284
        # rename LVs to the newly-generated names; we need to construct
1285
        # 'fake' LV disks with the old data, plus the new unique_id
1286
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1287
        rename_to = []
1288
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1289
          rename_to.append(t_dsk.logical_id)
1290
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1291
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1292
                                               zip(tmp_disks, rename_to))
1293
        result.Raise("Failed to rename adoped LVs")
1294
    else:
1295
      feedback_fn("* creating instance disks...")
1296
      try:
1297
        CreateDisks(self, iobj)
1298
      except errors.OpExecError:
1299
        self.LogWarning("Device creation failed")
1300
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1301
        raise
1302

    
1303
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1304

    
1305
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1306

    
1307
    # Declare that we don't want to remove the instance lock anymore, as we've
1308
    # added the instance to the config
1309
    del self.remove_locks[locking.LEVEL_INSTANCE]
1310

    
1311
    if self.op.mode == constants.INSTANCE_IMPORT:
1312
      # Release unused nodes
1313
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1314
    else:
1315
      # Release all nodes
1316
      ReleaseLocks(self, locking.LEVEL_NODE)
1317

    
1318
    disk_abort = False
1319
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1320
      feedback_fn("* wiping instance disks...")
1321
      try:
1322
        WipeDisks(self, iobj)
1323
      except errors.OpExecError, err:
1324
        logging.exception("Wiping disks failed")
1325
        self.LogWarning("Wiping instance disks failed (%s)", err)
1326
        disk_abort = True
1327

    
1328
    if disk_abort:
1329
      # Something is already wrong with the disks, don't do anything else
1330
      pass
1331
    elif self.op.wait_for_sync:
1332
      disk_abort = not WaitForSync(self, iobj)
1333
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1334
      # make sure the disks are not degraded (still sync-ing is ok)
1335
      feedback_fn("* checking mirrors status")
1336
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1337
    else:
1338
      disk_abort = False
1339

    
1340
    if disk_abort:
1341
      RemoveDisks(self, iobj)
1342
      self.cfg.RemoveInstance(iobj.uuid)
1343
      # Make sure the instance lock gets removed
1344
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1345
      raise errors.OpExecError("There are some degraded disks for"
1346
                               " this instance")
1347

    
1348
    # instance disks are now active
1349
    iobj.disks_active = True
1350

    
1351
    # Release all node resource locks
1352
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1353

    
1354
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1355
      if self.op.mode == constants.INSTANCE_CREATE:
1356
        if not self.op.no_install:
1357
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1358
                        not self.op.wait_for_sync)
1359
          if pause_sync:
1360
            feedback_fn("* pausing disk sync to install instance OS")
1361
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1362
                                                              (iobj.disks,
1363
                                                               iobj), True)
1364
            for idx, success in enumerate(result.payload):
1365
              if not success:
1366
                logging.warn("pause-sync of instance %s for disk %d failed",
1367
                             self.op.instance_name, idx)
1368

    
1369
          feedback_fn("* running the instance OS create scripts...")
1370
          # FIXME: pass debug option from opcode to backend
1371
          os_add_result = \
1372
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1373
                                          self.op.debug_level)
1374
          if pause_sync:
1375
            feedback_fn("* resuming disk sync")
1376
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1377
                                                              (iobj.disks,
1378
                                                               iobj), False)
1379
            for idx, success in enumerate(result.payload):
1380
              if not success:
1381
                logging.warn("resume-sync of instance %s for disk %d failed",
1382
                             self.op.instance_name, idx)
1383

    
1384
          os_add_result.Raise("Could not add os for instance %s"
1385
                              " on node %s" % (self.op.instance_name,
1386
                                               self.pnode.name))
1387

    
1388
      else:
1389
        if self.op.mode == constants.INSTANCE_IMPORT:
1390
          feedback_fn("* running the instance OS import scripts...")
1391

    
1392
          transfers = []
1393

    
1394
          for idx, image in enumerate(self.src_images):
1395
            if not image:
1396
              continue
1397

    
1398
            # FIXME: pass debug option from opcode to backend
1399
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1400
                                               constants.IEIO_FILE, (image, ),
1401
                                               constants.IEIO_SCRIPT,
1402
                                               ((iobj.disks[idx], iobj), idx),
1403
                                               None)
1404
            transfers.append(dt)
1405

    
1406
          import_result = \
1407
            masterd.instance.TransferInstanceData(self, feedback_fn,
1408
                                                  self.op.src_node_uuid,
1409
                                                  self.pnode.uuid,
1410
                                                  self.pnode.secondary_ip,
1411
                                                  iobj, transfers)
1412
          if not compat.all(import_result):
1413
            self.LogWarning("Some disks for instance %s on node %s were not"
1414
                            " imported successfully" % (self.op.instance_name,
1415
                                                        self.pnode.name))
1416

    
1417
          rename_from = self._old_instance_name
1418

    
1419
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1420
          feedback_fn("* preparing remote import...")
1421
          # The source cluster will stop the instance before attempting to make
1422
          # a connection. In some cases stopping an instance can take a long
1423
          # time, hence the shutdown timeout is added to the connection
1424
          # timeout.
1425
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1426
                             self.op.source_shutdown_timeout)
1427
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1428

    
1429
          assert iobj.primary_node == self.pnode.uuid
1430
          disk_results = \
1431
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1432
                                          self.source_x509_ca,
1433
                                          self._cds, timeouts)
1434
          if not compat.all(disk_results):
1435
            # TODO: Should the instance still be started, even if some disks
1436
            # failed to import (valid for local imports, too)?
1437
            self.LogWarning("Some disks for instance %s on node %s were not"
1438
                            " imported successfully" % (self.op.instance_name,
1439
                                                        self.pnode.name))
1440

    
1441
          rename_from = self.source_instance_name
1442

    
1443
        else:
1444
          # also checked in the prereq part
1445
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1446
                                       % self.op.mode)
1447

    
1448
        # Run rename script on newly imported instance
1449
        assert iobj.name == self.op.instance_name
1450
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1451
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1452
                                                   rename_from,
1453
                                                   self.op.debug_level)
1454
        result.Warn("Failed to run rename script for %s on node %s" %
1455
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1456

    
1457
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1458

    
1459
    if self.op.start:
1460
      iobj.admin_state = constants.ADMINST_UP
1461
      self.cfg.Update(iobj, feedback_fn)
1462
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1463
                   self.pnode.name)
1464
      feedback_fn("* starting instance...")
1465
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1466
                                            False, self.op.reason)
1467
      result.Raise("Could not start instance")
1468

    
1469
    return list(iobj.all_nodes)
1470

    
1471

    
1472
class LUInstanceRename(LogicalUnit):
1473
  """Rename an instance.
1474

1475
  """
1476
  HPATH = "instance-rename"
1477
  HTYPE = constants.HTYPE_INSTANCE
1478

    
1479
  def CheckArguments(self):
1480
    """Check arguments.
1481

1482
    """
1483
    if self.op.ip_check and not self.op.name_check:
1484
      # TODO: make the ip check more flexible and not depend on the name check
1485
      raise errors.OpPrereqError("IP address check requires a name check",
1486
                                 errors.ECODE_INVAL)
1487

    
1488
  def BuildHooksEnv(self):
1489
    """Build hooks env.
1490

1491
    This runs on master, primary and secondary nodes of the instance.
1492

1493
    """
1494
    env = BuildInstanceHookEnvByObject(self, self.instance)
1495
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1496
    return env
1497

    
1498
  def BuildHooksNodes(self):
1499
    """Build hooks nodes.
1500

1501
    """
1502
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1503
    return (nl, nl)
1504

    
1505
  def CheckPrereq(self):
1506
    """Check prerequisites.
1507

1508
    This checks that the instance is in the cluster and is not running.
1509

1510
    """
1511
    (self.op.instance_uuid, self.op.instance_name) = \
1512
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1513
                                self.op.instance_name)
1514
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1515
    assert instance is not None
1516

    
1517
    # It should actually not happen that an instance is running with a disabled
1518
    # disk template, but in case it does, the renaming of file-based instances
1519
    # will fail horribly. Thus, we test it before.
1520
    if (instance.disk_template in constants.DTS_FILEBASED and
1521
        self.op.new_name != instance.name):
1522
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1523
                               instance.disk_template)
1524

    
1525
    CheckNodeOnline(self, instance.primary_node)
1526
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1527
                       msg="cannot rename")
1528
    self.instance = instance
1529

    
1530
    new_name = self.op.new_name
1531
    if self.op.name_check:
1532
      hostname = _CheckHostnameSane(self, new_name)
1533
      new_name = self.op.new_name = hostname.name
1534
      if (self.op.ip_check and
1535
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1536
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1537
                                   (hostname.ip, new_name),
1538
                                   errors.ECODE_NOTUNIQUE)
1539

    
1540
    instance_names = [inst.name for
1541
                      inst in self.cfg.GetAllInstancesInfo().values()]
1542
    if new_name in instance_names and new_name != instance.name:
1543
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1544
                                 new_name, errors.ECODE_EXISTS)
1545

    
1546
  def Exec(self, feedback_fn):
1547
    """Rename the instance.
1548

1549
    """
1550
    old_name = self.instance.name
1551

    
1552
    rename_file_storage = False
1553
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1554
        self.op.new_name != self.instance.name):
1555
      old_file_storage_dir = os.path.dirname(
1556
                               self.instance.disks[0].logical_id[1])
1557
      rename_file_storage = True
1558

    
1559
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1560
    # Change the instance lock. This is definitely safe while we hold the BGL.
1561
    # Otherwise the new lock would have to be added in acquired mode.
1562
    assert self.REQ_BGL
1563
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1564
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1565
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1566

    
1567
    # re-read the instance from the configuration after rename
1568
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1569

    
1570
    if rename_file_storage:
1571
      new_file_storage_dir = os.path.dirname(
1572
                               renamed_inst.disks[0].logical_id[1])
1573
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1574
                                                     old_file_storage_dir,
1575
                                                     new_file_storage_dir)
1576
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1577
                   " (but the instance has been renamed in Ganeti)" %
1578
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1579
                    old_file_storage_dir, new_file_storage_dir))
1580

    
1581
    StartInstanceDisks(self, renamed_inst, None)
1582
    # update info on disks
1583
    info = GetInstanceInfoText(renamed_inst)
1584
    for (idx, disk) in enumerate(renamed_inst.disks):
1585
      for node_uuid in renamed_inst.all_nodes:
1586
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1587
                                                (disk, renamed_inst), info)
1588
        result.Warn("Error setting info on node %s for disk %s" %
1589
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1590
    try:
1591
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1592
                                                 renamed_inst, old_name,
1593
                                                 self.op.debug_level)
1594
      result.Warn("Could not run OS rename script for instance %s on node %s"
1595
                  " (but the instance has been renamed in Ganeti)" %
1596
                  (renamed_inst.name,
1597
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1598
                  self.LogWarning)
1599
    finally:
1600
      ShutdownInstanceDisks(self, renamed_inst)
1601

    
1602
    return renamed_inst.name
1603

    
1604

    
1605
class LUInstanceRemove(LogicalUnit):
1606
  """Remove an instance.
1607

1608
  """
1609
  HPATH = "instance-remove"
1610
  HTYPE = constants.HTYPE_INSTANCE
1611
  REQ_BGL = False
1612

    
1613
  def ExpandNames(self):
1614
    self._ExpandAndLockInstance()
1615
    self.needed_locks[locking.LEVEL_NODE] = []
1616
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1617
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1618

    
1619
  def DeclareLocks(self, level):
1620
    if level == locking.LEVEL_NODE:
1621
      self._LockInstancesNodes()
1622
    elif level == locking.LEVEL_NODE_RES:
1623
      # Copy node locks
1624
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1625
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1626

    
1627
  def BuildHooksEnv(self):
1628
    """Build hooks env.
1629

1630
    This runs on master, primary and secondary nodes of the instance.
1631

1632
    """
1633
    env = BuildInstanceHookEnvByObject(self, self.instance)
1634
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1635
    return env
1636

    
1637
  def BuildHooksNodes(self):
1638
    """Build hooks nodes.
1639

1640
    """
1641
    nl = [self.cfg.GetMasterNode()]
1642
    nl_post = list(self.instance.all_nodes) + nl
1643
    return (nl, nl_post)
1644

    
1645
  def CheckPrereq(self):
1646
    """Check prerequisites.
1647

1648
    This checks that the instance is in the cluster.
1649

1650
    """
1651
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1652
    assert self.instance is not None, \
1653
      "Cannot retrieve locked instance %s" % self.op.instance_name
1654

    
1655
  def Exec(self, feedback_fn):
1656
    """Remove the instance.
1657

1658
    """
1659
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1660
                 self.cfg.GetNodeName(self.instance.primary_node))
1661

    
1662
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1663
                                             self.instance,
1664
                                             self.op.shutdown_timeout,
1665
                                             self.op.reason)
1666
    if self.op.ignore_failures:
1667
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1668
    else:
1669
      result.Raise("Could not shutdown instance %s on node %s" %
1670
                   (self.instance.name,
1671
                    self.cfg.GetNodeName(self.instance.primary_node)))
1672

    
1673
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1674
            self.owned_locks(locking.LEVEL_NODE_RES))
1675
    assert not (set(self.instance.all_nodes) -
1676
                self.owned_locks(locking.LEVEL_NODE)), \
1677
      "Not owning correct locks"
1678

    
1679
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1680

    
1681

    
1682
class LUInstanceMove(LogicalUnit):
1683
  """Move an instance by data-copying.
1684

1685
  """
1686
  HPATH = "instance-move"
1687
  HTYPE = constants.HTYPE_INSTANCE
1688
  REQ_BGL = False
1689

    
1690
  def ExpandNames(self):
1691
    self._ExpandAndLockInstance()
1692
    (self.op.target_node_uuid, self.op.target_node) = \
1693
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1694
                            self.op.target_node)
1695
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1696
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1697
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1698

    
1699
  def DeclareLocks(self, level):
1700
    if level == locking.LEVEL_NODE:
1701
      self._LockInstancesNodes(primary_only=True)
1702
    elif level == locking.LEVEL_NODE_RES:
1703
      # Copy node locks
1704
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1705
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1706

    
1707
  def BuildHooksEnv(self):
1708
    """Build hooks env.
1709

1710
    This runs on master, primary and secondary nodes of the instance.
1711

1712
    """
1713
    env = {
1714
      "TARGET_NODE": self.op.target_node,
1715
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1716
      }
1717
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1718
    return env
1719

    
1720
  def BuildHooksNodes(self):
1721
    """Build hooks nodes.
1722

1723
    """
1724
    nl = [
1725
      self.cfg.GetMasterNode(),
1726
      self.instance.primary_node,
1727
      self.op.target_node_uuid,
1728
      ]
1729
    return (nl, nl)
1730

    
1731
  def CheckPrereq(self):
1732
    """Check prerequisites.
1733

1734
    This checks that the instance is in the cluster.
1735

1736
    """
1737
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1738
    assert self.instance is not None, \
1739
      "Cannot retrieve locked instance %s" % self.op.instance_name
1740

    
1741
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1742
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1743
                                 self.instance.disk_template,
1744
                                 errors.ECODE_STATE)
1745

    
1746
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1747
    assert target_node is not None, \
1748
      "Cannot retrieve locked node %s" % self.op.target_node
1749

    
1750
    self.target_node_uuid = target_node.uuid
1751
    if target_node.uuid == self.instance.primary_node:
1752
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1753
                                 (self.instance.name, target_node.name),
1754
                                 errors.ECODE_STATE)
1755

    
1756
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1757

    
1758
    for idx, dsk in enumerate(self.instance.disks):
1759
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1760
                              constants.DT_SHARED_FILE):
1761
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1762
                                   " cannot copy" % idx, errors.ECODE_STATE)
1763

    
1764
    CheckNodeOnline(self, target_node.uuid)
1765
    CheckNodeNotDrained(self, target_node.uuid)
1766
    CheckNodeVmCapable(self, target_node.uuid)
1767
    cluster = self.cfg.GetClusterInfo()
1768
    group_info = self.cfg.GetNodeGroup(target_node.group)
1769
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1770
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1771
                           ignore=self.op.ignore_ipolicy)
1772

    
1773
    if self.instance.admin_state == constants.ADMINST_UP:
1774
      # check memory requirements on the secondary node
1775
      CheckNodeFreeMemory(
1776
          self, target_node.uuid, "failing over instance %s" %
1777
          self.instance.name, bep[constants.BE_MAXMEM],
1778
          self.instance.hypervisor,
1779
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1780
    else:
1781
      self.LogInfo("Not checking memory on the secondary node as"
1782
                   " instance will not be started")
1783

    
1784
    # check bridge existance
1785
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1786

    
1787
  def Exec(self, feedback_fn):
1788
    """Move an instance.
1789

1790
    The move is done by shutting it down on its present node, copying
1791
    the data over (slow) and starting it on the new node.
1792

1793
    """
1794
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1795
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1796

    
1797
    self.LogInfo("Shutting down instance %s on source node %s",
1798
                 self.instance.name, source_node.name)
1799

    
1800
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1801
            self.owned_locks(locking.LEVEL_NODE_RES))
1802

    
1803
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1804
                                             self.op.shutdown_timeout,
1805
                                             self.op.reason)
1806
    if self.op.ignore_consistency:
1807
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1808
                  " anyway. Please make sure node %s is down. Error details" %
1809
                  (self.instance.name, source_node.name, source_node.name),
1810
                  self.LogWarning)
1811
    else:
1812
      result.Raise("Could not shutdown instance %s on node %s" %
1813
                   (self.instance.name, source_node.name))
1814

    
1815
    # create the target disks
1816
    try:
1817
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1818
    except errors.OpExecError:
1819
      self.LogWarning("Device creation failed")
1820
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1821
      raise
1822

    
1823
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1824

    
1825
    errs = []
1826
    # activate, get path, copy the data over
1827
    for idx, disk in enumerate(self.instance.disks):
1828
      self.LogInfo("Copying data for disk %d", idx)
1829
      result = self.rpc.call_blockdev_assemble(
1830
                 target_node.uuid, (disk, self.instance), self.instance.name,
1831
                 True, idx)
1832
      if result.fail_msg:
1833
        self.LogWarning("Can't assemble newly created disk %d: %s",
1834
                        idx, result.fail_msg)
1835
        errs.append(result.fail_msg)
1836
        break
1837
      dev_path, _ = result.payload
1838
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1839
                                                                self.instance),
1840
                                             target_node.secondary_ip,
1841
                                             dev_path, cluster_name)
1842
      if result.fail_msg:
1843
        self.LogWarning("Can't copy data over for disk %d: %s",
1844
                        idx, result.fail_msg)
1845
        errs.append(result.fail_msg)
1846
        break
1847

    
1848
    if errs:
1849
      self.LogWarning("Some disks failed to copy, aborting")
1850
      try:
1851
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1852
      finally:
1853
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1854
        raise errors.OpExecError("Errors during disk copy: %s" %
1855
                                 (",".join(errs),))
1856

    
1857
    self.instance.primary_node = target_node.uuid
1858
    self.cfg.Update(self.instance, feedback_fn)
1859

    
1860
    self.LogInfo("Removing the disks on the original node")
1861
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1862

    
1863
    # Only start the instance if it's marked as up
1864
    if self.instance.admin_state == constants.ADMINST_UP:
1865
      self.LogInfo("Starting instance %s on node %s",
1866
                   self.instance.name, target_node.name)
1867

    
1868
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1869
                                          ignore_secondaries=True)
1870
      if not disks_ok:
1871
        ShutdownInstanceDisks(self, self.instance)
1872
        raise errors.OpExecError("Can't activate the instance's disks")
1873

    
1874
      result = self.rpc.call_instance_start(target_node.uuid,
1875
                                            (self.instance, None, None), False,
1876
                                            self.op.reason)
1877
      msg = result.fail_msg
1878
      if msg:
1879
        ShutdownInstanceDisks(self, self.instance)
1880
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1881
                                 (self.instance.name, target_node.name, msg))
1882

    
1883

    
1884
class LUInstanceMultiAlloc(NoHooksLU):
1885
  """Allocates multiple instances at the same time.
1886

1887
  """
1888
  REQ_BGL = False
1889

    
1890
  def CheckArguments(self):
1891
    """Check arguments.
1892

1893
    """
1894
    nodes = []
1895
    for inst in self.op.instances:
1896
      if inst.iallocator is not None:
1897
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1898
                                   " instance objects", errors.ECODE_INVAL)
1899
      nodes.append(bool(inst.pnode))
1900
      if inst.disk_template in constants.DTS_INT_MIRROR:
1901
        nodes.append(bool(inst.snode))
1902

    
1903
    has_nodes = compat.any(nodes)
1904
    if compat.all(nodes) ^ has_nodes:
1905
      raise errors.OpPrereqError("There are instance objects providing"
1906
                                 " pnode/snode while others do not",
1907
                                 errors.ECODE_INVAL)
1908

    
1909
    if not has_nodes and self.op.iallocator is None:
1910
      default_iallocator = self.cfg.GetDefaultIAllocator()
1911
      if default_iallocator:
1912
        self.op.iallocator = default_iallocator
1913
      else:
1914
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1915
                                   " given and no cluster-wide default"
1916
                                   " iallocator found; please specify either"
1917
                                   " an iallocator or nodes on the instances"
1918
                                   " or set a cluster-wide default iallocator",
1919
                                   errors.ECODE_INVAL)
1920

    
1921
    _CheckOpportunisticLocking(self.op)
1922

    
1923
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1924
    if dups:
1925
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1926
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1927

    
1928
  def ExpandNames(self):
1929
    """Calculate the locks.
1930

1931
    """
1932
    self.share_locks = ShareAll()
1933
    self.needed_locks = {
1934
      # iallocator will select nodes and even if no iallocator is used,
1935
      # collisions with LUInstanceCreate should be avoided
1936
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1937
      }
1938

    
1939
    if self.op.iallocator:
1940
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1941
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1942

    
1943
      if self.op.opportunistic_locking:
1944
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1945
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1946
    else:
1947
      nodeslist = []
1948
      for inst in self.op.instances:
1949
        (inst.pnode_uuid, inst.pnode) = \
1950
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1951
        nodeslist.append(inst.pnode_uuid)
1952
        if inst.snode is not None:
1953
          (inst.snode_uuid, inst.snode) = \
1954
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1955
          nodeslist.append(inst.snode_uuid)
1956

    
1957
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1958
      # Lock resources of instance's primary and secondary nodes (copy to
1959
      # prevent accidential modification)
1960
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1961

    
1962
  def CheckPrereq(self):
1963
    """Check prerequisite.
1964

1965
    """
1966
    if self.op.iallocator:
1967
      cluster = self.cfg.GetClusterInfo()
1968
      default_vg = self.cfg.GetVGName()
1969
      ec_id = self.proc.GetECId()
1970

    
1971
      if self.op.opportunistic_locking:
1972
        # Only consider nodes for which a lock is held
1973
        node_whitelist = self.cfg.GetNodeNames(
1974
                           list(self.owned_locks(locking.LEVEL_NODE)))
1975
      else:
1976
        node_whitelist = None
1977

    
1978
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1979
                                           _ComputeNics(op, cluster, None,
1980
                                                        self.cfg, ec_id),
1981
                                           _ComputeFullBeParams(op, cluster),
1982
                                           node_whitelist)
1983
               for op in self.op.instances]
1984

    
1985
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1986
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1987

    
1988
      ial.Run(self.op.iallocator)
1989

    
1990
      if not ial.success:
1991
        raise errors.OpPrereqError("Can't compute nodes using"
1992
                                   " iallocator '%s': %s" %
1993
                                   (self.op.iallocator, ial.info),
1994
                                   errors.ECODE_NORES)
1995

    
1996
      self.ia_result = ial.result
1997

    
1998
    if self.op.dry_run:
1999
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2000
        constants.JOB_IDS_KEY: [],
2001
        })
2002

    
2003
  def _ConstructPartialResult(self):
2004
    """Contructs the partial result.
2005

2006
    """
2007
    if self.op.iallocator:
2008
      (allocatable, failed_insts) = self.ia_result
2009
      allocatable_insts = map(compat.fst, allocatable)
2010
    else:
2011
      allocatable_insts = [op.instance_name for op in self.op.instances]
2012
      failed_insts = []
2013

    
2014
    return {
2015
      constants.ALLOCATABLE_KEY: allocatable_insts,
2016
      constants.FAILED_KEY: failed_insts,
2017
      }
2018

    
2019
  def Exec(self, feedback_fn):
2020
    """Executes the opcode.
2021

2022
    """
2023
    jobs = []
2024
    if self.op.iallocator:
2025
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2026
      (allocatable, failed) = self.ia_result
2027

    
2028
      for (name, node_names) in allocatable:
2029
        op = op2inst.pop(name)
2030

    
2031
        (op.pnode_uuid, op.pnode) = \
2032
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2033
        if len(node_names) > 1:
2034
          (op.snode_uuid, op.snode) = \
2035
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2036

    
2037
          jobs.append([op])
2038

    
2039
        missing = set(op2inst.keys()) - set(failed)
2040
        assert not missing, \
2041
          "Iallocator did return incomplete result: %s" % \
2042
          utils.CommaJoin(missing)
2043
    else:
2044
      jobs.extend([op] for op in self.op.instances)
2045

    
2046
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2047

    
2048

    
2049
class _InstNicModPrivate:
2050
  """Data structure for network interface modifications.
2051

2052
  Used by L{LUInstanceSetParams}.
2053

2054
  """
2055
  def __init__(self):
2056
    self.params = None
2057
    self.filled = None
2058

    
2059

    
2060
def _PrepareContainerMods(mods, private_fn):
2061
  """Prepares a list of container modifications by adding a private data field.
2062

2063
  @type mods: list of tuples; (operation, index, parameters)
2064
  @param mods: List of modifications
2065
  @type private_fn: callable or None
2066
  @param private_fn: Callable for constructing a private data field for a
2067
    modification
2068
  @rtype: list
2069

2070
  """
2071
  if private_fn is None:
2072
    fn = lambda: None
2073
  else:
2074
    fn = private_fn
2075

    
2076
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2077

    
2078

    
2079
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2080
  """Checks if nodes have enough physical CPUs
2081

2082
  This function checks if all given nodes have the needed number of
2083
  physical CPUs. In case any node has less CPUs or we cannot get the
2084
  information from the node, this function raises an OpPrereqError
2085
  exception.
2086

2087
  @type lu: C{LogicalUnit}
2088
  @param lu: a logical unit from which we get configuration data
2089
  @type node_uuids: C{list}
2090
  @param node_uuids: the list of node UUIDs to check
2091
  @type requested: C{int}
2092
  @param requested: the minimum acceptable number of physical CPUs
2093
  @type hypervisor_specs: list of pairs (string, dict of strings)
2094
  @param hypervisor_specs: list of hypervisor specifications in
2095
      pairs (hypervisor_name, hvparams)
2096
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2097
      or we cannot check the node
2098

2099
  """
2100
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2101
  for node_uuid in node_uuids:
2102
    info = nodeinfo[node_uuid]
2103
    node_name = lu.cfg.GetNodeName(node_uuid)
2104
    info.Raise("Cannot get current information from node %s" % node_name,
2105
               prereq=True, ecode=errors.ECODE_ENVIRON)
2106
    (_, _, (hv_info, )) = info.payload
2107
    num_cpus = hv_info.get("cpu_total", None)
2108
    if not isinstance(num_cpus, int):
2109
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2110
                                 " on node %s, result was '%s'" %
2111
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2112
    if requested > num_cpus:
2113
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2114
                                 "required" % (node_name, num_cpus, requested),
2115
                                 errors.ECODE_NORES)
2116

    
2117

    
2118
def GetItemFromContainer(identifier, kind, container):
2119
  """Return the item refered by the identifier.
2120

2121
  @type identifier: string
2122
  @param identifier: Item index or name or UUID
2123
  @type kind: string
2124
  @param kind: One-word item description
2125
  @type container: list
2126
  @param container: Container to get the item from
2127

2128
  """
2129
  # Index
2130
  try:
2131
    idx = int(identifier)
2132
    if idx == -1:
2133
      # Append
2134
      absidx = len(container) - 1
2135
    elif idx < 0:
2136
      raise IndexError("Not accepting negative indices other than -1")
2137
    elif idx > len(container):
2138
      raise IndexError("Got %s index %s, but there are only %s" %
2139
                       (kind, idx, len(container)))
2140
    else:
2141
      absidx = idx
2142
    return (absidx, container[idx])
2143
  except ValueError:
2144
    pass
2145

    
2146
  for idx, item in enumerate(container):
2147
    if item.uuid == identifier or item.name == identifier:
2148
      return (idx, item)
2149

    
2150
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2151
                             (kind, identifier), errors.ECODE_NOENT)
2152

    
2153

    
2154
def _ApplyContainerMods(kind, container, chgdesc, mods,
2155
                        create_fn, modify_fn, remove_fn,
2156
                        post_add_fn=None):
2157
  """Applies descriptions in C{mods} to C{container}.
2158

2159
  @type kind: string
2160
  @param kind: One-word item description
2161
  @type container: list
2162
  @param container: Container to modify
2163
  @type chgdesc: None or list
2164
  @param chgdesc: List of applied changes
2165
  @type mods: list
2166
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2167
  @type create_fn: callable
2168
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2169
    receives absolute item index, parameters and private data object as added
2170
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2171
    as list
2172
  @type modify_fn: callable
2173
  @param modify_fn: Callback for modifying an existing item
2174
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2175
    and private data object as added by L{_PrepareContainerMods}, returns
2176
    changes as list
2177
  @type remove_fn: callable
2178
  @param remove_fn: Callback on removing item; receives absolute item index,
2179
    item and private data object as added by L{_PrepareContainerMods}
2180
  @type post_add_fn: callable
2181
  @param post_add_fn: Callable for post-processing a newly created item after
2182
    it has been put into the container. It receives the index of the new item
2183
    and the new item as parameters.
2184

2185
  """
2186
  for (op, identifier, params, private) in mods:
2187
    changes = None
2188

    
2189
    if op == constants.DDM_ADD:
2190
      # Calculate where item will be added
2191
      # When adding an item, identifier can only be an index
2192
      try:
2193
        idx = int(identifier)
2194
      except ValueError:
2195
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2196
                                   " identifier for %s" % constants.DDM_ADD,
2197
                                   errors.ECODE_INVAL)
2198
      if idx == -1:
2199
        addidx = len(container)
2200
      else:
2201
        if idx < 0:
2202
          raise IndexError("Not accepting negative indices other than -1")
2203
        elif idx > len(container):
2204
          raise IndexError("Got %s index %s, but there are only %s" %
2205
                           (kind, idx, len(container)))
2206
        addidx = idx
2207

    
2208
      if create_fn is None:
2209
        item = params
2210
      else:
2211
        (item, changes) = create_fn(addidx, params, private)
2212

    
2213
      if idx == -1:
2214
        container.append(item)
2215
      else:
2216
        assert idx >= 0
2217
        assert idx <= len(container)
2218
        # list.insert does so before the specified index
2219
        container.insert(idx, item)
2220

    
2221
      if post_add_fn is not None:
2222
        post_add_fn(addidx, item)
2223

    
2224
    else:
2225
      # Retrieve existing item
2226
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2227

    
2228
      if op == constants.DDM_REMOVE:
2229
        assert not params
2230

    
2231
        changes = [("%s/%s" % (kind, absidx), "remove")]
2232

    
2233
        if remove_fn is not None:
2234
          msg = remove_fn(absidx, item, private)
2235
          if msg:
2236
            changes.append(("%s/%s" % (kind, absidx), msg))
2237

    
2238
        assert container[absidx] == item
2239
        del container[absidx]
2240
      elif op == constants.DDM_MODIFY:
2241
        if modify_fn is not None:
2242
          changes = modify_fn(absidx, item, params, private)
2243
      else:
2244
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2245

    
2246
    assert _TApplyContModsCbChanges(changes)
2247

    
2248
    if not (chgdesc is None or changes is None):
2249
      chgdesc.extend(changes)
2250

    
2251

    
2252
def _UpdateIvNames(base_index, disks):
2253
  """Updates the C{iv_name} attribute of disks.
2254

2255
  @type disks: list of L{objects.Disk}
2256

2257
  """
2258
  for (idx, disk) in enumerate(disks):
2259
    disk.iv_name = "disk/%s" % (base_index + idx, )
2260

    
2261

    
2262
class LUInstanceSetParams(LogicalUnit):
2263
  """Modifies an instances's parameters.
2264

2265
  """
2266
  HPATH = "instance-modify"
2267
  HTYPE = constants.HTYPE_INSTANCE
2268
  REQ_BGL = False
2269

    
2270
  @staticmethod
2271
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2272
    assert ht.TList(mods)
2273
    assert not mods or len(mods[0]) in (2, 3)
2274

    
2275
    if mods and len(mods[0]) == 2:
2276
      result = []
2277

    
2278
      addremove = 0
2279
      for op, params in mods:
2280
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2281
          result.append((op, -1, params))
2282
          addremove += 1
2283

    
2284
          if addremove > 1:
2285
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2286
                                       " supported at a time" % kind,
2287
                                       errors.ECODE_INVAL)
2288
        else:
2289
          result.append((constants.DDM_MODIFY, op, params))
2290

    
2291
      assert verify_fn(result)
2292
    else:
2293
      result = mods
2294

    
2295
    return result
2296

    
2297
  @staticmethod
2298
  def _CheckMods(kind, mods, key_types, item_fn):
2299
    """Ensures requested disk/NIC modifications are valid.
2300

2301
    """
2302
    for (op, _, params) in mods:
2303
      assert ht.TDict(params)
2304

    
2305
      # If 'key_types' is an empty dict, we assume we have an
2306
      # 'ext' template and thus do not ForceDictType
2307
      if key_types:
2308
        utils.ForceDictType(params, key_types)
2309

    
2310
      if op == constants.DDM_REMOVE:
2311
        if params:
2312
          raise errors.OpPrereqError("No settings should be passed when"
2313
                                     " removing a %s" % kind,
2314
                                     errors.ECODE_INVAL)
2315
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2316
        item_fn(op, params)
2317
      else:
2318
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2319

    
2320
  @staticmethod
2321
  def _VerifyDiskModification(op, params, excl_stor):
2322
    """Verifies a disk modification.
2323

2324
    """
2325
    if op == constants.DDM_ADD:
2326
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2327
      if mode not in constants.DISK_ACCESS_SET:
2328
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2329
                                   errors.ECODE_INVAL)
2330

    
2331
      size = params.get(constants.IDISK_SIZE, None)
2332
      if size is None:
2333
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2334
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2335
      size = int(size)
2336

    
2337
      params[constants.IDISK_SIZE] = size
2338
      name = params.get(constants.IDISK_NAME, None)
2339
      if name is not None and name.lower() == constants.VALUE_NONE:
2340
        params[constants.IDISK_NAME] = None
2341

    
2342
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2343

    
2344
    elif op == constants.DDM_MODIFY:
2345
      if constants.IDISK_SIZE in params:
2346
        raise errors.OpPrereqError("Disk size change not possible, use"
2347
                                   " grow-disk", errors.ECODE_INVAL)
2348
      if len(params) > 2:
2349
        raise errors.OpPrereqError("Disk modification doesn't support"
2350
                                   " additional arbitrary parameters",
2351
                                   errors.ECODE_INVAL)
2352
      name = params.get(constants.IDISK_NAME, None)
2353
      if name is not None and name.lower() == constants.VALUE_NONE:
2354
        params[constants.IDISK_NAME] = None
2355

    
2356
  @staticmethod
2357
  def _VerifyNicModification(op, params):
2358
    """Verifies a network interface modification.
2359

2360
    """
2361
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2362
      ip = params.get(constants.INIC_IP, None)
2363
      name = params.get(constants.INIC_NAME, None)
2364
      req_net = params.get(constants.INIC_NETWORK, None)
2365
      link = params.get(constants.NIC_LINK, None)
2366
      mode = params.get(constants.NIC_MODE, None)
2367
      if name is not None and name.lower() == constants.VALUE_NONE:
2368
        params[constants.INIC_NAME] = None
2369
      if req_net is not None:
2370
        if req_net.lower() == constants.VALUE_NONE:
2371
          params[constants.INIC_NETWORK] = None
2372
          req_net = None
2373
        elif link is not None or mode is not None:
2374
          raise errors.OpPrereqError("If network is given"
2375
                                     " mode or link should not",
2376
                                     errors.ECODE_INVAL)
2377

    
2378
      if op == constants.DDM_ADD:
2379
        macaddr = params.get(constants.INIC_MAC, None)
2380
        if macaddr is None:
2381
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2382

    
2383
      if ip is not None:
2384
        if ip.lower() == constants.VALUE_NONE:
2385
          params[constants.INIC_IP] = None
2386
        else:
2387
          if ip.lower() == constants.NIC_IP_POOL:
2388
            if op == constants.DDM_ADD and req_net is None:
2389
              raise errors.OpPrereqError("If ip=pool, parameter network"
2390
                                         " cannot be none",
2391
                                         errors.ECODE_INVAL)
2392
          else:
2393
            if not netutils.IPAddress.IsValid(ip):
2394
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2395
                                         errors.ECODE_INVAL)
2396

    
2397
      if constants.INIC_MAC in params:
2398
        macaddr = params[constants.INIC_MAC]
2399
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2400
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2401

    
2402
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2403
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2404
                                     " modifying an existing NIC",
2405
                                     errors.ECODE_INVAL)
2406

    
2407
  def CheckArguments(self):
2408
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2409
            self.op.hvparams or self.op.beparams or self.op.os_name or
2410
            self.op.osparams or self.op.offline is not None or
2411
            self.op.runtime_mem or self.op.pnode):
2412
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2413

    
2414
    if self.op.hvparams:
2415
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2416
                           "hypervisor", "instance", "cluster")
2417

    
2418
    self.op.disks = self._UpgradeDiskNicMods(
2419
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2420
    self.op.nics = self._UpgradeDiskNicMods(
2421
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2422

    
2423
    if self.op.disks and self.op.disk_template is not None:
2424
      raise errors.OpPrereqError("Disk template conversion and other disk"
2425
                                 " changes not supported at the same time",
2426
                                 errors.ECODE_INVAL)
2427

    
2428
    if (self.op.disk_template and
2429
        self.op.disk_template in constants.DTS_INT_MIRROR and
2430
        self.op.remote_node is None):
2431
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2432
                                 " one requires specifying a secondary node",
2433
                                 errors.ECODE_INVAL)
2434

    
2435
    # Check NIC modifications
2436
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2437
                    self._VerifyNicModification)
2438

    
2439
    if self.op.pnode:
2440
      (self.op.pnode_uuid, self.op.pnode) = \
2441
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2442

    
2443
  def ExpandNames(self):
2444
    self._ExpandAndLockInstance()
2445
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2446
    # Can't even acquire node locks in shared mode as upcoming changes in
2447
    # Ganeti 2.6 will start to modify the node object on disk conversion
2448
    self.needed_locks[locking.LEVEL_NODE] = []
2449
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2450
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2451
    # Look node group to look up the ipolicy
2452
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2453

    
2454
  def DeclareLocks(self, level):
2455
    if level == locking.LEVEL_NODEGROUP:
2456
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2457
      # Acquire locks for the instance's nodegroups optimistically. Needs
2458
      # to be verified in CheckPrereq
2459
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2460
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2461
    elif level == locking.LEVEL_NODE:
2462
      self._LockInstancesNodes()
2463
      if self.op.disk_template and self.op.remote_node:
2464
        (self.op.remote_node_uuid, self.op.remote_node) = \
2465
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2466
                                self.op.remote_node)
2467
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2468
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2469
      # Copy node locks
2470
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2471
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2472

    
2473
  def BuildHooksEnv(self):
2474
    """Build hooks env.
2475

2476
    This runs on the master, primary and secondaries.
2477

2478
    """
2479
    args = {}
2480
    if constants.BE_MINMEM in self.be_new:
2481
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2482
    if constants.BE_MAXMEM in self.be_new:
2483
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2484
    if constants.BE_VCPUS in self.be_new:
2485
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2486
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2487
    # information at all.
2488

    
2489
    if self._new_nics is not None:
2490
      nics = []
2491

    
2492
      for nic in self._new_nics:
2493
        n = copy.deepcopy(nic)
2494
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2495
        n.nicparams = nicparams
2496
        nics.append(NICToTuple(self, n))
2497

    
2498
      args["nics"] = nics
2499

    
2500
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2501
    if self.op.disk_template:
2502
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2503
    if self.op.runtime_mem:
2504
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2505

    
2506
    return env
2507

    
2508
  def BuildHooksNodes(self):
2509
    """Build hooks nodes.
2510

2511
    """
2512
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2513
    return (nl, nl)
2514

    
2515
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2516
                              old_params, cluster, pnode_uuid):
2517

    
2518
    update_params_dict = dict([(key, params[key])
2519
                               for key in constants.NICS_PARAMETERS
2520
                               if key in params])
2521

    
2522
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2523
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2524

    
2525
    new_net_uuid = None
2526
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2527
    if new_net_uuid_or_name:
2528
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2529
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2530

    
2531
    if old_net_uuid:
2532
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2533

    
2534
    if new_net_uuid:
2535
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2536
      if not netparams:
2537
        raise errors.OpPrereqError("No netparams found for the network"
2538
                                   " %s, probably not connected" %
2539
                                   new_net_obj.name, errors.ECODE_INVAL)
2540
      new_params = dict(netparams)
2541
    else:
2542
      new_params = GetUpdatedParams(old_params, update_params_dict)
2543

    
2544
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2545

    
2546
    new_filled_params = cluster.SimpleFillNIC(new_params)
2547
    objects.NIC.CheckParameterSyntax(new_filled_params)
2548

    
2549
    new_mode = new_filled_params[constants.NIC_MODE]
2550
    if new_mode == constants.NIC_MODE_BRIDGED:
2551
      bridge = new_filled_params[constants.NIC_LINK]
2552
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2553
      if msg:
2554
        msg = "Error checking bridges on node '%s': %s" % \
2555
                (self.cfg.GetNodeName(pnode_uuid), msg)
2556
        if self.op.force:
2557
          self.warn.append(msg)
2558
        else:
2559
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2560

    
2561
    elif new_mode == constants.NIC_MODE_ROUTED:
2562
      ip = params.get(constants.INIC_IP, old_ip)
2563
      if ip is None:
2564
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2565
                                   " on a routed NIC", errors.ECODE_INVAL)
2566

    
2567
    elif new_mode == constants.NIC_MODE_OVS:
2568
      # TODO: check OVS link
2569
      self.LogInfo("OVS links are currently not checked for correctness")
2570

    
2571
    if constants.INIC_MAC in params:
2572
      mac = params[constants.INIC_MAC]
2573
      if mac is None:
2574
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2575
                                   errors.ECODE_INVAL)
2576
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2577
        # otherwise generate the MAC address
2578
        params[constants.INIC_MAC] = \
2579
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2580
      else:
2581
        # or validate/reserve the current one
2582
        try:
2583
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2584
        except errors.ReservationError:
2585
          raise errors.OpPrereqError("MAC address '%s' already in use"
2586
                                     " in cluster" % mac,
2587
                                     errors.ECODE_NOTUNIQUE)
2588
    elif new_net_uuid != old_net_uuid:
2589

    
2590
      def get_net_prefix(net_uuid):
2591
        mac_prefix = None
2592
        if net_uuid:
2593
          nobj = self.cfg.GetNetwork(net_uuid)
2594
          mac_prefix = nobj.mac_prefix
2595

    
2596
        return mac_prefix
2597

    
2598
      new_prefix = get_net_prefix(new_net_uuid)
2599
      old_prefix = get_net_prefix(old_net_uuid)
2600
      if old_prefix != new_prefix:
2601
        params[constants.INIC_MAC] = \
2602
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2603

    
2604
    # if there is a change in (ip, network) tuple
2605
    new_ip = params.get(constants.INIC_IP, old_ip)
2606
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2607
      if new_ip:
2608
        # if IP is pool then require a network and generate one IP
2609
        if new_ip.lower() == constants.NIC_IP_POOL:
2610
          if new_net_uuid:
2611
            try:
2612
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2613
            except errors.ReservationError:
2614
              raise errors.OpPrereqError("Unable to get a free IP"
2615
                                         " from the address pool",
2616
                                         errors.ECODE_STATE)
2617
            self.LogInfo("Chose IP %s from network %s",
2618
                         new_ip,
2619
                         new_net_obj.name)
2620
            params[constants.INIC_IP] = new_ip
2621
          else:
2622
            raise errors.OpPrereqError("ip=pool, but no network found",
2623
                                       errors.ECODE_INVAL)
2624
        # Reserve new IP if in the new network if any
2625
        elif new_net_uuid:
2626
          try:
2627
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2628
            self.LogInfo("Reserving IP %s in network %s",
2629
                         new_ip, new_net_obj.name)
2630
          except errors.ReservationError:
2631
            raise errors.OpPrereqError("IP %s not available in network %s" %
2632
                                       (new_ip, new_net_obj.name),
2633
                                       errors.ECODE_NOTUNIQUE)
2634
        # new network is None so check if new IP is a conflicting IP
2635
        elif self.op.conflicts_check:
2636
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2637

    
2638
      # release old IP if old network is not None
2639
      if old_ip and old_net_uuid:
2640
        try:
2641
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2642
        except errors.AddressPoolError:
2643
          logging.warning("Release IP %s not contained in network %s",
2644
                          old_ip, old_net_obj.name)
2645

    
2646
    # there are no changes in (ip, network) tuple and old network is not None
2647
    elif (old_net_uuid is not None and
2648
          (req_link is not None or req_mode is not None)):
2649
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2650
                                 " a NIC that is connected to a network",
2651
                                 errors.ECODE_INVAL)
2652

    
2653
    private.params = new_params
2654
    private.filled = new_filled_params
2655

    
2656
  def _PreCheckDiskTemplate(self, pnode_info):
2657
    """CheckPrereq checks related to a new disk template."""
2658
    # Arguments are passed to avoid configuration lookups
2659
    pnode_uuid = self.instance.primary_node
2660
    if self.instance.disk_template == self.op.disk_template:
2661
      raise errors.OpPrereqError("Instance already has disk template %s" %
2662
                                 self.instance.disk_template,
2663
                                 errors.ECODE_INVAL)
2664

    
2665
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2666
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2667
                                 " cluster." % self.op.disk_template)
2668

    
2669
    if (self.instance.disk_template,
2670
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2671
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2672
                                 " %s to %s" % (self.instance.disk_template,
2673
                                                self.op.disk_template),
2674
                                 errors.ECODE_INVAL)
2675
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2676
                       msg="cannot change disk template")
2677
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2678
      if self.op.remote_node_uuid == pnode_uuid:
2679
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2680
                                   " as the primary node of the instance" %
2681
                                   self.op.remote_node, errors.ECODE_STATE)
2682
      CheckNodeOnline(self, self.op.remote_node_uuid)
2683
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2684
      # FIXME: here we assume that the old instance type is DT_PLAIN
2685
      assert self.instance.disk_template == constants.DT_PLAIN
2686
      disks = [{constants.IDISK_SIZE: d.size,
2687
                constants.IDISK_VG: d.logical_id[0]}
2688
               for d in self.instance.disks]
2689
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2690
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2691

    
2692
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2693
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2694
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2695
                                                              snode_group)
2696
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2697
                             ignore=self.op.ignore_ipolicy)
2698
      if pnode_info.group != snode_info.group:
2699
        self.LogWarning("The primary and secondary nodes are in two"
2700
                        " different node groups; the disk parameters"
2701
                        " from the first disk's node group will be"
2702
                        " used")
2703

    
2704
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2705
      # Make sure none of the nodes require exclusive storage
2706
      nodes = [pnode_info]
2707
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2708
        assert snode_info
2709
        nodes.append(snode_info)
2710
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2711
      if compat.any(map(has_es, nodes)):
2712
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2713
                  " storage is enabled" % (self.instance.disk_template,
2714
                                           self.op.disk_template))
2715
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2716

    
2717
  def _PreCheckDisks(self, ispec):
2718
    """CheckPrereq checks related to disk changes.
2719

2720
    @type ispec: dict
2721
    @param ispec: instance specs to be updated with the new disks
2722

2723
    """
2724
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2725

    
2726
    excl_stor = compat.any(
2727
      rpc.GetExclusiveStorageForNodes(self.cfg,
2728
                                      self.instance.all_nodes).values()
2729
      )
2730

    
2731
    # Check disk modifications. This is done here and not in CheckArguments
2732
    # (as with NICs), because we need to know the instance's disk template
2733
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2734
    if self.instance.disk_template == constants.DT_EXT:
2735
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2736
    else:
2737
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2738
                      ver_fn)
2739

    
2740
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2741

    
2742
    # Check the validity of the `provider' parameter
2743
    if self.instance.disk_template in constants.DT_EXT:
2744
      for mod in self.diskmod:
2745
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2746
        if mod[0] == constants.DDM_ADD:
2747
          if ext_provider is None:
2748
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2749
                                       " '%s' missing, during disk add" %
2750
                                       (constants.DT_EXT,
2751
                                        constants.IDISK_PROVIDER),
2752
                                       errors.ECODE_NOENT)
2753
        elif mod[0] == constants.DDM_MODIFY:
2754
          if ext_provider:
2755
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2756
                                       " modification" %
2757
                                       constants.IDISK_PROVIDER,
2758
                                       errors.ECODE_INVAL)
2759
    else:
2760
      for mod in self.diskmod:
2761
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2762
        if ext_provider is not None:
2763
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2764
                                     " instances of type '%s'" %
2765
                                     (constants.IDISK_PROVIDER,
2766
                                      constants.DT_EXT),
2767
                                     errors.ECODE_INVAL)
2768

    
2769
    if not self.op.wait_for_sync and self.instance.disks_active:
2770
      for mod in self.diskmod:
2771
        if mod[0] == constants.DDM_ADD:
2772
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2773
                                     " activated disks and"
2774
                                     " --no-wait-for-sync given.",
2775
                                     errors.ECODE_INVAL)
2776

    
2777
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2778
      raise errors.OpPrereqError("Disk operations not supported for"
2779
                                 " diskless instances", errors.ECODE_INVAL)
2780

    
2781
    def _PrepareDiskMod(_, disk, params, __):
2782
      disk.name = params.get(constants.IDISK_NAME, None)
2783

    
2784
    # Verify disk changes (operating on a copy)
2785
    disks = copy.deepcopy(self.instance.disks)
2786
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2787
                        _PrepareDiskMod, None)
2788
    utils.ValidateDeviceNames("disk", disks)
2789
    if len(disks) > constants.MAX_DISKS:
2790
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2791
                                 " more" % constants.MAX_DISKS,
2792
                                 errors.ECODE_STATE)
2793
    disk_sizes = [disk.size for disk in self.instance.disks]
2794
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2795
                      self.diskmod if op == constants.DDM_ADD)
2796
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2797
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2798

    
2799
    if self.op.offline is not None and self.op.offline:
2800
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2801
                         msg="can't change to offline")
2802

    
2803
  def CheckPrereq(self):
2804
    """Check prerequisites.
2805

2806
    This only checks the instance list against the existing names.
2807

2808
    """
2809
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2810
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2811
    self.cluster = self.cfg.GetClusterInfo()
2812
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2813

    
2814
    assert self.instance is not None, \
2815
      "Cannot retrieve locked instance %s" % self.op.instance_name
2816

    
2817
    pnode_uuid = self.instance.primary_node
2818

    
2819
    self.warn = []
2820

    
2821
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2822
        not self.op.force):
2823
      # verify that the instance is not up
2824
      instance_info = self.rpc.call_instance_info(
2825
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2826
          cluster_hvparams)
2827
      if instance_info.fail_msg:
2828
        self.warn.append("Can't get instance runtime information: %s" %
2829
                         instance_info.fail_msg)
2830
      elif instance_info.payload:
2831
        raise errors.OpPrereqError("Instance is still running on %s" %
2832
                                   self.cfg.GetNodeName(pnode_uuid),
2833
                                   errors.ECODE_STATE)
2834

    
2835
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2836
    node_uuids = list(self.instance.all_nodes)
2837
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2838

    
2839
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2840
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2841
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2842

    
2843
    # dictionary with instance information after the modification
2844
    ispec = {}
2845

    
2846
    if self.op.hotplug:
2847
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2848
                                               self.instance)
2849
      result.Raise("Hotplug is not supported.")
2850

    
2851
    # Prepare NIC modifications
2852
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2853

    
2854
    # OS change
2855
    if self.op.os_name and not self.op.force:
2856
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2857
                     self.op.force_variant)
2858
      instance_os = self.op.os_name
2859
    else:
2860
      instance_os = self.instance.os
2861

    
2862
    assert not (self.op.disk_template and self.op.disks), \
2863
      "Can't modify disk template and apply disk changes at the same time"
2864

    
2865
    if self.op.disk_template:
2866
      self._PreCheckDiskTemplate(pnode_info)
2867

    
2868
    self._PreCheckDisks(ispec)
2869

    
2870
    # hvparams processing
2871
    if self.op.hvparams:
2872
      hv_type = self.instance.hypervisor
2873
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2874
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2875
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2876

    
2877
      # local check
2878
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2879
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2880
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2881
      self.hv_inst = i_hvdict # the new dict (without defaults)
2882
    else:
2883
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2884
                                                   self.instance.os,
2885
                                                   self.instance.hvparams)
2886
      self.hv_new = self.hv_inst = {}
2887

    
2888
    # beparams processing
2889
    if self.op.beparams:
2890
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2891
                                  use_none=True)
2892
      objects.UpgradeBeParams(i_bedict)
2893
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2894
      be_new = self.cluster.SimpleFillBE(i_bedict)
2895
      self.be_proposed = self.be_new = be_new # the new actual values
2896
      self.be_inst = i_bedict # the new dict (without defaults)
2897
    else:
2898
      self.be_new = self.be_inst = {}
2899
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2900
    be_old = self.cluster.FillBE(self.instance)
2901

    
2902
    # CPU param validation -- checking every time a parameter is
2903
    # changed to cover all cases where either CPU mask or vcpus have
2904
    # changed
2905
    if (constants.BE_VCPUS in self.be_proposed and
2906
        constants.HV_CPU_MASK in self.hv_proposed):
2907
      cpu_list = \
2908
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2909
      # Verify mask is consistent with number of vCPUs. Can skip this
2910
      # test if only 1 entry in the CPU mask, which means same mask
2911
      # is applied to all vCPUs.
2912
      if (len(cpu_list) > 1 and
2913
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2914
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2915
                                   " CPU mask [%s]" %
2916
                                   (self.be_proposed[constants.BE_VCPUS],
2917
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2918
                                   errors.ECODE_INVAL)
2919

    
2920
      # Only perform this test if a new CPU mask is given
2921
      if constants.HV_CPU_MASK in self.hv_new:
2922
        # Calculate the largest CPU number requested
2923
        max_requested_cpu = max(map(max, cpu_list))
2924
        # Check that all of the instance's nodes have enough physical CPUs to
2925
        # satisfy the requested CPU mask
2926
        hvspecs = [(self.instance.hypervisor,
2927
                    self.cfg.GetClusterInfo()
2928
                      .hvparams[self.instance.hypervisor])]
2929
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2930
                                max_requested_cpu + 1,
2931
                                hvspecs)
2932

    
2933
    # osparams processing
2934
    if self.op.osparams:
2935
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2936
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2937
      self.os_inst = i_osdict # the new dict (without defaults)
2938
    else:
2939
      self.os_inst = {}
2940

    
2941
    #TODO(dynmem): do the appropriate check involving MINMEM
2942
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2943
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2944
      mem_check_list = [pnode_uuid]
2945
      if be_new[constants.BE_AUTO_BALANCE]:
2946
        # either we changed auto_balance to yes or it was from before
2947
        mem_check_list.extend(self.instance.secondary_nodes)
2948
      instance_info = self.rpc.call_instance_info(
2949
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2950
          cluster_hvparams)
2951
      hvspecs = [(self.instance.hypervisor,
2952
                  cluster_hvparams)]
2953
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2954
                                         hvspecs)
2955
      pninfo = nodeinfo[pnode_uuid]
2956
      msg = pninfo.fail_msg
2957
      if msg:
2958
        # Assume the primary node is unreachable and go ahead
2959
        self.warn.append("Can't get info from primary node %s: %s" %
2960
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2961
      else:
2962
        (_, _, (pnhvinfo, )) = pninfo.payload
2963
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2964
          self.warn.append("Node data from primary node %s doesn't contain"
2965
                           " free memory information" %
2966
                           self.cfg.GetNodeName(pnode_uuid))
2967
        elif instance_info.fail_msg:
2968
          self.warn.append("Can't get instance runtime information: %s" %
2969
                           instance_info.fail_msg)
2970
        else:
2971
          if instance_info.payload:
2972
            current_mem = int(instance_info.payload["memory"])
2973
          else:
2974
            # Assume instance not running
2975
            # (there is a slight race condition here, but it's not very
2976
            # probable, and we have no other way to check)
2977
            # TODO: Describe race condition
2978
            current_mem = 0
2979
          #TODO(dynmem): do the appropriate check involving MINMEM
2980
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2981
                      pnhvinfo["memory_free"])
2982
          if miss_mem > 0:
2983
            raise errors.OpPrereqError("This change will prevent the instance"
2984
                                       " from starting, due to %d MB of memory"
2985
                                       " missing on its primary node" %
2986
                                       miss_mem, errors.ECODE_NORES)
2987

    
2988
      if be_new[constants.BE_AUTO_BALANCE]:
2989
        for node_uuid, nres in nodeinfo.items():
2990
          if node_uuid not in self.instance.secondary_nodes:
2991
            continue
2992
          nres.Raise("Can't get info from secondary node %s" %
2993
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2994
                     ecode=errors.ECODE_STATE)
2995
          (_, _, (nhvinfo, )) = nres.payload
2996
          if not isinstance(nhvinfo.get("memory_free", None), int):
2997
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2998
                                       " memory information" %
2999
                                       self.cfg.GetNodeName(node_uuid),
3000
                                       errors.ECODE_STATE)
3001
          #TODO(dynmem): do the appropriate check involving MINMEM
3002
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3003
            raise errors.OpPrereqError("This change will prevent the instance"
3004
                                       " from failover to its secondary node"
3005
                                       " %s, due to not enough memory" %
3006
                                       self.cfg.GetNodeName(node_uuid),
3007
                                       errors.ECODE_STATE)
3008

    
3009
    if self.op.runtime_mem:
3010
      remote_info = self.rpc.call_instance_info(
3011
         self.instance.primary_node, self.instance.name,
3012
         self.instance.hypervisor,
3013
         cluster_hvparams)
3014
      remote_info.Raise("Error checking node %s" %
3015
                        self.cfg.GetNodeName(self.instance.primary_node))
3016
      if not remote_info.payload: # not running already
3017
        raise errors.OpPrereqError("Instance %s is not running" %
3018
                                   self.instance.name, errors.ECODE_STATE)
3019

    
3020
      current_memory = remote_info.payload["memory"]
3021
      if (not self.op.force and
3022
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3023
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3024
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3025
                                   " and %d MB of memory unless --force is"
3026
                                   " given" %
3027
                                   (self.instance.name,
3028
                                    self.be_proposed[constants.BE_MINMEM],
3029
                                    self.be_proposed[constants.BE_MAXMEM]),
3030
                                   errors.ECODE_INVAL)
3031

    
3032
      delta = self.op.runtime_mem - current_memory
3033
      if delta > 0:
3034
        CheckNodeFreeMemory(
3035
            self, self.instance.primary_node,
3036
            "ballooning memory for instance %s" % self.instance.name, delta,
3037
            self.instance.hypervisor,
3038
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3039

    
3040
    # make self.cluster visible in the functions below
3041
    cluster = self.cluster
3042

    
3043
    def _PrepareNicCreate(_, params, private):
3044
      self._PrepareNicModification(params, private, None, None,
3045
                                   {}, cluster, pnode_uuid)
3046
      return (None, None)
3047

    
3048
    def _PrepareNicMod(_, nic, params, private):
3049
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3050
                                   nic.nicparams, cluster, pnode_uuid)
3051
      return None
3052

    
3053
    def _PrepareNicRemove(_, params, __):
3054
      ip = params.ip
3055
      net = params.network
3056
      if net is not None and ip is not None:
3057
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3058

    
3059
    # Verify NIC changes (operating on copy)
3060
    nics = self.instance.nics[:]
3061
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3062
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3063
    if len(nics) > constants.MAX_NICS:
3064
      raise errors.OpPrereqError("Instance has too many network interfaces"
3065
                                 " (%d), cannot add more" % constants.MAX_NICS,
3066
                                 errors.ECODE_STATE)
3067

    
3068
    # Pre-compute NIC changes (necessary to use result in hooks)
3069
    self._nic_chgdesc = []
3070
    if self.nicmod:
3071
      # Operate on copies as this is still in prereq
3072
      nics = [nic.Copy() for nic in self.instance.nics]
3073
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3074
                          self._CreateNewNic, self._ApplyNicMods,
3075
                          self._RemoveNic)
3076
      # Verify that NIC names are unique and valid
3077
      utils.ValidateDeviceNames("NIC", nics)
3078
      self._new_nics = nics
3079
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3080
    else:
3081
      self._new_nics = None
3082
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3083

    
3084
    if not self.op.ignore_ipolicy:
3085
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3086
                                                              group_info)
3087

    
3088
      # Fill ispec with backend parameters
3089
      ispec[constants.ISPEC_SPINDLE_USE] = \
3090
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3091
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3092
                                                         None)
3093

    
3094
      # Copy ispec to verify parameters with min/max values separately
3095
      if self.op.disk_template:
3096
        new_disk_template = self.op.disk_template
3097
      else:
3098
        new_disk_template = self.instance.disk_template
3099
      ispec_max = ispec.copy()
3100
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3101
        self.be_new.get(constants.BE_MAXMEM, None)
3102
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3103
                                                     new_disk_template)
3104
      ispec_min = ispec.copy()
3105
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3106
        self.be_new.get(constants.BE_MINMEM, None)
3107
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3108
                                                     new_disk_template)
3109

    
3110
      if (res_max or res_min):
3111
        # FIXME: Improve error message by including information about whether
3112
        # the upper or lower limit of the parameter fails the ipolicy.
3113
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3114
               (group_info, group_info.name,
3115
                utils.CommaJoin(set(res_max + res_min))))
3116
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3117

    
3118
  def _ConvertPlainToDrbd(self, feedback_fn):
3119
    """Converts an instance from plain to drbd.
3120

3121
    """
3122
    feedback_fn("Converting template to drbd")
3123
    pnode_uuid = self.instance.primary_node
3124
    snode_uuid = self.op.remote_node_uuid
3125

    
3126
    assert self.instance.disk_template == constants.DT_PLAIN
3127

    
3128
    # create a fake disk info for _GenerateDiskTemplate
3129
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3130
                  constants.IDISK_VG: d.logical_id[0],
3131
                  constants.IDISK_NAME: d.name}
3132
                 for d in self.instance.disks]
3133
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3134
                                     self.instance.uuid, pnode_uuid,
3135
                                     [snode_uuid], disk_info, None, None, 0,
3136
                                     feedback_fn, self.diskparams)
3137
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3138
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3139
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3140
    info = GetInstanceInfoText(self.instance)
3141
    feedback_fn("Creating additional volumes...")
3142
    # first, create the missing data and meta devices
3143
    for disk in anno_disks:
3144
      # unfortunately this is... not too nice
3145
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3146
                           info, True, p_excl_stor)
3147
      for child in disk.children:
3148
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3149
                             s_excl_stor)
3150
    # at this stage, all new LVs have been created, we can rename the
3151
    # old ones
3152
    feedback_fn("Renaming original volumes...")
3153
    rename_list = [(o, n.children[0].logical_id)
3154
                   for (o, n) in zip(self.instance.disks, new_disks)]
3155
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3156
    result.Raise("Failed to rename original LVs")
3157

    
3158
    feedback_fn("Initializing DRBD devices...")
3159
    # all child devices are in place, we can now create the DRBD devices
3160
    try:
3161
      for disk in anno_disks:
3162
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3163
                                       (snode_uuid, s_excl_stor)]:
3164
          f_create = node_uuid == pnode_uuid
3165
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3166
                               f_create, excl_stor)
3167
    except errors.GenericError, e:
3168
      feedback_fn("Initializing of DRBD devices failed;"
3169
                  " renaming back original volumes...")
3170
      rename_back_list = [(n.children[0], o.logical_id)
3171
                          for (n, o) in zip(new_disks, self.instance.disks)]
3172
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3173
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3174
      raise
3175

    
3176
    # at this point, the instance has been modified
3177
    self.instance.disk_template = constants.DT_DRBD8
3178
    self.instance.disks = new_disks
3179
    self.cfg.Update(self.instance, feedback_fn)
3180

    
3181
    # Release node locks while waiting for sync
3182
    ReleaseLocks(self, locking.LEVEL_NODE)
3183

    
3184
    # disks are created, waiting for sync
3185
    disk_abort = not WaitForSync(self, self.instance,
3186
                                 oneshot=not self.op.wait_for_sync)
3187
    if disk_abort:
3188
      raise errors.OpExecError("There are some degraded disks for"
3189
                               " this instance, please cleanup manually")
3190

    
3191
    # Node resource locks will be released by caller
3192

    
3193
  def _ConvertDrbdToPlain(self, feedback_fn):
3194
    """Converts an instance from drbd to plain.
3195

3196
    """
3197
    assert len(self.instance.secondary_nodes) == 1
3198
    assert self.instance.disk_template == constants.DT_DRBD8
3199

    
3200
    pnode_uuid = self.instance.primary_node
3201
    snode_uuid = self.instance.secondary_nodes[0]
3202
    feedback_fn("Converting template to plain")
3203

    
3204
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3205
    new_disks = [d.children[0] for d in self.instance.disks]
3206

    
3207
    # copy over size, mode and name
3208
    for parent, child in zip(old_disks, new_disks):
3209
      child.size = parent.size
3210
      child.mode = parent.mode
3211
      child.name = parent.name
3212

    
3213
    # this is a DRBD disk, return its port to the pool
3214
    # NOTE: this must be done right before the call to cfg.Update!
3215
    for disk in old_disks:
3216
      tcp_port = disk.logical_id[2]
3217
      self.cfg.AddTcpUdpPort(tcp_port)
3218

    
3219
    # update instance structure
3220
    self.instance.disks = new_disks
3221
    self.instance.disk_template = constants.DT_PLAIN
3222
    _UpdateIvNames(0, self.instance.disks)
3223
    self.cfg.Update(self.instance, feedback_fn)
3224

    
3225
    # Release locks in case removing disks takes a while
3226
    ReleaseLocks(self, locking.LEVEL_NODE)
3227

    
3228
    feedback_fn("Removing volumes on the secondary node...")
3229
    for disk in old_disks:
3230
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3231
      result.Warn("Could not remove block device %s on node %s,"
3232
                  " continuing anyway" %
3233
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3234
                  self.LogWarning)
3235

    
3236
    feedback_fn("Removing unneeded volumes on the primary node...")
3237
    for idx, disk in enumerate(old_disks):
3238
      meta = disk.children[1]
3239
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3240
      result.Warn("Could not remove metadata for disk %d on node %s,"
3241
                  " continuing anyway" %
3242
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3243
                  self.LogWarning)
3244

    
3245
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3246
    self.LogInfo("Trying to hotplug device...")
3247
    msg = "hotplug:"
3248
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3249
                                          self.instance, action, dev_type,
3250
                                          (device, self.instance),
3251
                                          extra, seq)
3252
    if result.fail_msg:
3253
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3254
      self.LogInfo("Continuing execution..")
3255
      msg += "failed"
3256
    else:
3257
      self.LogInfo("Hotplug done.")
3258
      msg += "done"
3259
    return msg
3260

    
3261
  def _CreateNewDisk(self, idx, params, _):
3262
    """Creates a new disk.
3263

3264
    """
3265
    # add a new disk
3266
    if self.instance.disk_template in constants.DTS_FILEBASED:
3267
      (file_driver, file_path) = self.instance.disks[0].logical_id
3268
      file_path = os.path.dirname(file_path)
3269
    else:
3270
      file_driver = file_path = None
3271

    
3272
    disk = \
3273
      GenerateDiskTemplate(self, self.instance.disk_template,
3274
                           self.instance.uuid, self.instance.primary_node,
3275
                           self.instance.secondary_nodes, [params], file_path,
3276
                           file_driver, idx, self.Log, self.diskparams)[0]
3277

    
3278
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3279

    
3280
    if self.cluster.prealloc_wipe_disks:
3281
      # Wipe new disk
3282
      WipeOrCleanupDisks(self, self.instance,
3283
                         disks=[(idx, disk, 0)],
3284
                         cleanup=new_disks)
3285

    
3286
    changes = [
3287
      ("disk/%d" % idx,
3288
      "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3289
      ]
3290
    if self.op.hotplug:
3291
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3292
                                               (disk, self.instance),
3293
                                               self.instance.name, True, idx)
3294
      if result.fail_msg:
3295
        changes.append(("disk/%d" % idx, "assemble:failed"))
3296
        self.LogWarning("Can't assemble newly created disk %d: %s",
3297
                        idx, result.fail_msg)
3298
      else:
3299
        _, link_name = result.payload
3300
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3301
                                  constants.HOTPLUG_TARGET_DISK,
3302
                                  disk, link_name, idx)
3303
        changes.append(("disk/%d" % idx, msg))
3304

    
3305
    return (disk, changes)
3306

    
3307
  def _PostAddDisk(self, _, disk):
3308
    if not WaitForSync(self, self.instance, disks=[disk],
3309
                       oneshot=not self.op.wait_for_sync):
3310
      raise errors.OpExecError("Failed to sync disks of %s" %
3311
                               self.instance.name)
3312

    
3313
    # the disk is active at this point, so deactivate it if the instance disks
3314
    # are supposed to be inactive
3315
    if not self.instance.disks_active:
3316
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3317

    
3318
  @staticmethod
3319
  def _ModifyDisk(idx, disk, params, _):
3320
    """Modifies a disk.
3321

3322
    """
3323
    changes = []
3324
    mode = params.get(constants.IDISK_MODE, None)
3325
    if mode:
3326
      disk.mode = mode
3327
      changes.append(("disk.mode/%d" % idx, disk.mode))
3328

    
3329
    name = params.get(constants.IDISK_NAME, None)
3330
    disk.name = name
3331
    changes.append(("disk.name/%d" % idx, disk.name))
3332

    
3333
    return changes
3334

    
3335
  def _RemoveDisk(self, idx, root, _):
3336
    """Removes a disk.
3337

3338
    """
3339
    hotmsg = ""
3340
    if self.op.hotplug:
3341
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3342
                                   constants.HOTPLUG_TARGET_DISK,
3343
                                   root, None, idx)
3344
      ShutdownInstanceDisks(self, self.instance, [root])
3345

    
3346
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3347
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3348
                             self.instance.primary_node):
3349
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3350
              .fail_msg
3351
      if msg:
3352
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3353
                        " continuing anyway", idx,
3354
                        self.cfg.GetNodeName(node_uuid), msg)
3355

    
3356
    # if this is a DRBD disk, return its port to the pool
3357
    if root.dev_type in constants.DTS_DRBD:
3358
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3359

    
3360
    return hotmsg
3361

    
3362
  def _CreateNewNic(self, idx, params, private):
3363
    """Creates data structure for a new network interface.
3364

3365
    """
3366
    mac = params[constants.INIC_MAC]
3367
    ip = params.get(constants.INIC_IP, None)
3368
    net = params.get(constants.INIC_NETWORK, None)
3369
    name = params.get(constants.INIC_NAME, None)
3370
    net_uuid = self.cfg.LookupNetwork(net)
3371
    #TODO: not private.filled?? can a nic have no nicparams??
3372
    nicparams = private.filled
3373
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3374
                       nicparams=nicparams)
3375
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3376

    
3377
    changes = [
3378
      ("nic.%d" % idx,
3379
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3380
       (mac, ip, private.filled[constants.NIC_MODE],
3381
       private.filled[constants.NIC_LINK], net)),
3382
      ]
3383

    
3384
    if self.op.hotplug:
3385
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3386
                                constants.HOTPLUG_TARGET_NIC,
3387
                                nobj, None, idx)
3388
      changes.append(("nic.%d" % idx, msg))
3389

    
3390
    return (nobj, changes)
3391

    
3392
  def _ApplyNicMods(self, idx, nic, params, private):
3393
    """Modifies a network interface.
3394

3395
    """
3396
    changes = []
3397

    
3398
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3399
      if key in params:
3400
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3401
        setattr(nic, key, params[key])
3402

    
3403
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3404
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3405
    if new_net_uuid != nic.network:
3406
      changes.append(("nic.network/%d" % idx, new_net))
3407
      nic.network = new_net_uuid
3408

    
3409
    if private.filled:
3410
      nic.nicparams = private.filled
3411

    
3412
      for (key, val) in nic.nicparams.items():
3413
        changes.append(("nic.%s/%d" % (key, idx), val))
3414

    
3415
    if self.op.hotplug:
3416
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3417
                                constants.HOTPLUG_TARGET_NIC,
3418
                                nic, None, idx)
3419
      changes.append(("nic/%d" % idx, msg))
3420

    
3421
    return changes
3422

    
3423
  def _RemoveNic(self, idx, nic, _):
3424
    if self.op.hotplug:
3425
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3426
                                 constants.HOTPLUG_TARGET_NIC,
3427
                                 nic, None, idx)
3428

    
3429
  def Exec(self, feedback_fn):
3430
    """Modifies an instance.
3431

3432
    All parameters take effect only at the next restart of the instance.
3433

3434
    """
3435
    # Process here the warnings from CheckPrereq, as we don't have a
3436
    # feedback_fn there.
3437
    # TODO: Replace with self.LogWarning
3438
    for warn in self.warn:
3439
      feedback_fn("WARNING: %s" % warn)
3440

    
3441
    assert ((self.op.disk_template is None) ^
3442
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3443
      "Not owning any node resource locks"
3444

    
3445
    result = []
3446

    
3447
    # New primary node
3448
    if self.op.pnode_uuid:
3449
      self.instance.primary_node = self.op.pnode_uuid
3450

    
3451
    # runtime memory
3452
    if self.op.runtime_mem:
3453
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3454
                                                     self.instance,
3455
                                                     self.op.runtime_mem)
3456
      rpcres.Raise("Cannot modify instance runtime memory")
3457
      result.append(("runtime_memory", self.op.runtime_mem))
3458

    
3459
    # Apply disk changes
3460
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3461
                        self._CreateNewDisk, self._ModifyDisk,
3462
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3463
    _UpdateIvNames(0, self.instance.disks)
3464

    
3465
    if self.op.disk_template:
3466
      if __debug__:
3467
        check_nodes = set(self.instance.all_nodes)
3468
        if self.op.remote_node_uuid:
3469
          check_nodes.add(self.op.remote_node_uuid)
3470
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3471
          owned = self.owned_locks(level)
3472
          assert not (check_nodes - owned), \
3473
            ("Not owning the correct locks, owning %r, expected at least %r" %
3474
             (owned, check_nodes))
3475

    
3476
      r_shut = ShutdownInstanceDisks(self, self.instance)
3477
      if not r_shut:
3478
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3479
                                 " proceed with disk template conversion")
3480
      mode = (self.instance.disk_template, self.op.disk_template)
3481
      try:
3482
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3483
      except:
3484
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3485
        raise
3486
      result.append(("disk_template", self.op.disk_template))
3487

    
3488
      assert self.instance.disk_template == self.op.disk_template, \
3489
        ("Expected disk template '%s', found '%s'" %
3490
         (self.op.disk_template, self.instance.disk_template))
3491

    
3492
    # Release node and resource locks if there are any (they might already have
3493
    # been released during disk conversion)
3494
    ReleaseLocks(self, locking.LEVEL_NODE)
3495
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3496

    
3497
    # Apply NIC changes
3498
    if self._new_nics is not None:
3499
      self.instance.nics = self._new_nics
3500
      result.extend(self._nic_chgdesc)
3501

    
3502
    # hvparams changes
3503
    if self.op.hvparams:
3504
      self.instance.hvparams = self.hv_inst
3505
      for key, val in self.op.hvparams.iteritems():
3506
        result.append(("hv/%s" % key, val))
3507

    
3508
    # beparams changes
3509
    if self.op.beparams:
3510
      self.instance.beparams = self.be_inst
3511
      for key, val in self.op.beparams.iteritems():
3512
        result.append(("be/%s" % key, val))
3513

    
3514
    # OS change
3515
    if self.op.os_name:
3516
      self.instance.os = self.op.os_name
3517

    
3518
    # osparams changes
3519
    if self.op.osparams:
3520
      self.instance.osparams = self.os_inst
3521
      for key, val in self.op.osparams.iteritems():
3522
        result.append(("os/%s" % key, val))
3523

    
3524
    if self.op.offline is None:
3525
      # Ignore
3526
      pass
3527
    elif self.op.offline:
3528
      # Mark instance as offline
3529
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3530
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3531
    else:
3532
      # Mark instance as online, but stopped
3533
      self.cfg.MarkInstanceDown(self.instance.uuid)
3534
      result.append(("admin_state", constants.ADMINST_DOWN))
3535

    
3536
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3537

    
3538
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3539
                self.owned_locks(locking.LEVEL_NODE)), \
3540
      "All node locks should have been released by now"
3541

    
3542
    return result
3543

    
3544
  _DISK_CONVERSIONS = {
3545
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3546
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3547
    }
3548

    
3549

    
3550
class LUInstanceChangeGroup(LogicalUnit):
3551
  HPATH = "instance-change-group"
3552
  HTYPE = constants.HTYPE_INSTANCE
3553
  REQ_BGL = False
3554

    
3555
  def ExpandNames(self):
3556
    self.share_locks = ShareAll()
3557

    
3558
    self.needed_locks = {
3559
      locking.LEVEL_NODEGROUP: [],
3560
      locking.LEVEL_NODE: [],
3561
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3562
      }
3563

    
3564
    self._ExpandAndLockInstance()
3565

    
3566
    if self.op.target_groups:
3567
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3568
                                  self.op.target_groups)
3569
    else:
3570
      self.req_target_uuids = None
3571

    
3572
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3573

    
3574
  def DeclareLocks(self, level):
3575
    if level == locking.LEVEL_NODEGROUP:
3576
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3577

    
3578
      if self.req_target_uuids:
3579
        lock_groups = set(self.req_target_uuids)
3580

    
3581
        # Lock all groups used by instance optimistically; this requires going
3582
        # via the node before it's locked, requiring verification later on
3583
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3584
        lock_groups.update(instance_groups)
3585
      else:
3586
        # No target groups, need to lock all of them
3587
        lock_groups = locking.ALL_SET
3588

    
3589
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3590

    
3591
    elif level == locking.LEVEL_NODE:
3592
      if self.req_target_uuids:
3593
        # Lock all nodes used by instances
3594
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3595
        self._LockInstancesNodes()
3596

    
3597
        # Lock all nodes in all potential target groups
3598
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3599
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3600
        member_nodes = [node_uuid
3601
                        for group in lock_groups
3602
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3603
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3604
      else:
3605
        # Lock all nodes as all groups are potential targets
3606
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3607

    
3608
  def CheckPrereq(self):
3609
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3610
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3611
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3612

    
3613
    assert (self.req_target_uuids is None or
3614
            owned_groups.issuperset(self.req_target_uuids))
3615
    assert owned_instance_names == set([self.op.instance_name])
3616

    
3617
    # Get instance information
3618
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3619

    
3620
    # Check if node groups for locked instance are still correct
3621
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3622
      ("Instance %s's nodes changed while we kept the lock" %
3623
       self.op.instance_name)
3624

    
3625
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3626
                                          owned_groups)
3627

    
3628
    if self.req_target_uuids:
3629
      # User requested specific target groups
3630
      self.target_uuids = frozenset(self.req_target_uuids)
3631
    else:
3632
      # All groups except those used by the instance are potential targets
3633
      self.target_uuids = owned_groups - inst_groups
3634

    
3635
    conflicting_groups = self.target_uuids & inst_groups
3636
    if conflicting_groups:
3637
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3638
                                 " used by the instance '%s'" %
3639
                                 (utils.CommaJoin(conflicting_groups),
3640
                                  self.op.instance_name),
3641
                                 errors.ECODE_INVAL)
3642

    
3643
    if not self.target_uuids:
3644
      raise errors.OpPrereqError("There are no possible target groups",
3645
                                 errors.ECODE_INVAL)
3646

    
3647
  def BuildHooksEnv(self):
3648
    """Build hooks env.
3649

3650
    """
3651
    assert self.target_uuids
3652

    
3653
    env = {
3654
      "TARGET_GROUPS": " ".join(self.target_uuids),
3655
      }
3656

    
3657
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3658

    
3659
    return env
3660

    
3661
  def BuildHooksNodes(self):
3662
    """Build hooks nodes.
3663

3664
    """
3665
    mn = self.cfg.GetMasterNode()
3666
    return ([mn], [mn])
3667

    
3668
  def Exec(self, feedback_fn):
3669
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3670

    
3671
    assert instances == [self.op.instance_name], "Instance not locked"
3672

    
3673
    req = iallocator.IAReqGroupChange(instances=instances,
3674
                                      target_groups=list(self.target_uuids))
3675
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3676

    
3677
    ial.Run(self.op.iallocator)
3678

    
3679
    if not ial.success:
3680
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3681
                                 " instance '%s' using iallocator '%s': %s" %
3682
                                 (self.op.instance_name, self.op.iallocator,
3683
                                  ial.info), errors.ECODE_NORES)
3684

    
3685
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3686

    
3687
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3688
                 " instance '%s'", len(jobs), self.op.instance_name)
3689

    
3690
    return ResultWithJobs(jobs)