Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 0c3d9c7c

History | View | Annotate | Download (141.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
187
      raise errors.OpPrereqError("VLAN is given, but network mode is not"
188
                                 " openvswitch", errors.ECODE_INVAL)
189

    
190
    # ip validity checks
191
    if ip is None or ip.lower() == constants.VALUE_NONE:
192
      nic_ip = None
193
    elif ip.lower() == constants.VALUE_AUTO:
194
      if not op.name_check:
195
        raise errors.OpPrereqError("IP address set to auto but name checks"
196
                                   " have been skipped",
197
                                   errors.ECODE_INVAL)
198
      nic_ip = default_ip
199
    else:
200
      # We defer pool operations until later, so that the iallocator has
201
      # filled in the instance's node(s) dimara
202
      if ip.lower() == constants.NIC_IP_POOL:
203
        if net is None:
204
          raise errors.OpPrereqError("if ip=pool, parameter network"
205
                                     " must be passed too",
206
                                     errors.ECODE_INVAL)
207

    
208
      elif not netutils.IPAddress.IsValid(ip):
209
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210
                                   errors.ECODE_INVAL)
211

    
212
      nic_ip = ip
213

    
214
    # TODO: check the ip address for uniqueness
215
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
217
                                 errors.ECODE_INVAL)
218

    
219
    # MAC address verification
220
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222
      mac = utils.NormalizeAndValidateMac(mac)
223

    
224
      try:
225
        # TODO: We need to factor this out
226
        cfg.ReserveMAC(mac, ec_id)
227
      except errors.ReservationError:
228
        raise errors.OpPrereqError("MAC address %s already in use"
229
                                   " in cluster" % mac,
230
                                   errors.ECODE_NOTUNIQUE)
231

    
232
    #  Build nic parameters
233
    nicparams = {}
234
    if nic_mode_req:
235
      nicparams[constants.NIC_MODE] = nic_mode
236
    if link:
237
      nicparams[constants.NIC_LINK] = link
238
    if vlan:
239
      nicparams[constants.NIC_VLAN] = vlan
240

    
241
    check_params = cluster.SimpleFillNIC(nicparams)
242
    objects.NIC.CheckParameterSyntax(check_params)
243
    net_uuid = cfg.LookupNetwork(net)
244
    name = nic.get(constants.INIC_NAME, None)
245
    if name is not None and name.lower() == constants.VALUE_NONE:
246
      name = None
247
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248
                          network=net_uuid, nicparams=nicparams)
249
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
250
    nics.append(nic_obj)
251

    
252
  return nics
253

    
254

    
255
def _CheckForConflictingIp(lu, ip, node_uuid):
256
  """In case of conflicting IP address raise error.
257

258
  @type ip: string
259
  @param ip: IP address
260
  @type node_uuid: string
261
  @param node_uuid: node UUID
262

263
  """
264
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
265
  if conf_net is not None:
266
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267
                                " network %s, but the target NIC does not." %
268
                                (ip, conf_net)),
269
                               errors.ECODE_STATE)
270

    
271
  return (None, None)
272

    
273

    
274
def _ComputeIPolicyInstanceSpecViolation(
275
  ipolicy, instance_spec, disk_template,
276
  _compute_fn=ComputeIPolicySpecViolation):
277
  """Compute if instance specs meets the specs of ipolicy.
278

279
  @type ipolicy: dict
280
  @param ipolicy: The ipolicy to verify against
281
  @param instance_spec: dict
282
  @param instance_spec: The instance spec to verify
283
  @type disk_template: string
284
  @param disk_template: the disk template of the instance
285
  @param _compute_fn: The function to verify ipolicy (unittest only)
286
  @see: L{ComputeIPolicySpecViolation}
287

288
  """
289
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
295

    
296
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297
                     disk_sizes, spindle_use, disk_template)
298

    
299

    
300
def _CheckOSVariant(os_obj, name):
301
  """Check whether an OS name conforms to the os variants specification.
302

303
  @type os_obj: L{objects.OS}
304
  @param os_obj: OS object to check
305
  @type name: string
306
  @param name: OS name passed by the user, to check for validity
307

308
  """
309
  variant = objects.OS.GetVariant(name)
310
  if not os_obj.supported_variants:
311
    if variant:
312
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313
                                 " passed)" % (os_obj.name, variant),
314
                                 errors.ECODE_INVAL)
315
    return
316
  if not variant:
317
    raise errors.OpPrereqError("OS name must include a variant",
318
                               errors.ECODE_INVAL)
319

    
320
  if variant not in os_obj.supported_variants:
321
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
322

    
323

    
324
class LUInstanceCreate(LogicalUnit):
325
  """Create an instance.
326

327
  """
328
  HPATH = "instance-add"
329
  HTYPE = constants.HTYPE_INSTANCE
330
  REQ_BGL = False
331

    
332
  def _CheckDiskTemplateValid(self):
333
    """Checks validity of disk template.
334

335
    """
336
    cluster = self.cfg.GetClusterInfo()
337
    if self.op.disk_template is None:
338
      # FIXME: It would be better to take the default disk template from the
339
      # ipolicy, but for the ipolicy we need the primary node, which we get from
340
      # the iallocator, which wants the disk template as input. To solve this
341
      # chicken-and-egg problem, it should be possible to specify just a node
342
      # group from the iallocator and take the ipolicy from that.
343
      self.op.disk_template = cluster.enabled_disk_templates[0]
344
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
345

    
346
  def _CheckDiskArguments(self):
347
    """Checks validity of disk-related arguments.
348

349
    """
350
    # check that disk's names are unique and valid
351
    utils.ValidateDeviceNames("disk", self.op.disks)
352

    
353
    self._CheckDiskTemplateValid()
354

    
355
    # check disks. parameter names and consistent adopt/no-adopt strategy
356
    has_adopt = has_no_adopt = False
357
    for disk in self.op.disks:
358
      if self.op.disk_template != constants.DT_EXT:
359
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
360
      if constants.IDISK_ADOPT in disk:
361
        has_adopt = True
362
      else:
363
        has_no_adopt = True
364
    if has_adopt and has_no_adopt:
365
      raise errors.OpPrereqError("Either all disks are adopted or none is",
366
                                 errors.ECODE_INVAL)
367
    if has_adopt:
368
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
369
        raise errors.OpPrereqError("Disk adoption is not supported for the"
370
                                   " '%s' disk template" %
371
                                   self.op.disk_template,
372
                                   errors.ECODE_INVAL)
373
      if self.op.iallocator is not None:
374
        raise errors.OpPrereqError("Disk adoption not allowed with an"
375
                                   " iallocator script", errors.ECODE_INVAL)
376
      if self.op.mode == constants.INSTANCE_IMPORT:
377
        raise errors.OpPrereqError("Disk adoption not allowed for"
378
                                   " instance import", errors.ECODE_INVAL)
379
    else:
380
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
381
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
382
                                   " but no 'adopt' parameter given" %
383
                                   self.op.disk_template,
384
                                   errors.ECODE_INVAL)
385

    
386
    self.adopt_disks = has_adopt
387

    
388
  def _CheckVLANArguments(self):
389
    """ Check validity of VLANs if given
390

391
    """
392
    for nic in self.op.nics:
393
      vlan = nic.get(constants.INIC_VLAN, None)
394
      if vlan:
395
        if vlan[0] == ".":
396
          # vlan starting with dot means single untagged vlan,
397
          # might be followed by trunk (:)
398
          if not vlan[1:].isdigit():
399
            vlanlist = vlan[1:].split(':')
400
            for vl in vlanlist:
401
              if not vl.isdigit():
402
                raise errors.OpPrereqError("Specified VLAN parameter is "
403
                                           "invalid : %s" % vlan,
404
                                             errors.ECODE_INVAL)
405
        elif vlan[0] == ":":
406
          # Trunk - tagged only
407
          vlanlist = vlan[1:].split(':')
408
          for vl in vlanlist:
409
            if not vl.isdigit():
410
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                           " : %s" % vlan, errors.ECODE_INVAL)
412
        elif vlan.isdigit():
413
          # This is the simplest case. No dots, only single digit
414
          # -> Create untagged access port, dot needs to be added
415
          nic[constants.INIC_VLAN] = "." + vlan
416
        else:
417
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418
                                       " : %s" % vlan, errors.ECODE_INVAL)
419

    
420
  def CheckArguments(self):
421
    """Check arguments.
422

423
    """
424
    # do not require name_check to ease forward/backward compatibility
425
    # for tools
426
    if self.op.no_install and self.op.start:
427
      self.LogInfo("No-installation mode selected, disabling startup")
428
      self.op.start = False
429
    # validate/normalize the instance name
430
    self.op.instance_name = \
431
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
432

    
433
    if self.op.ip_check and not self.op.name_check:
434
      # TODO: make the ip check more flexible and not depend on the name check
435
      raise errors.OpPrereqError("Cannot do IP address check without a name"
436
                                 " check", errors.ECODE_INVAL)
437

    
438
    # check nics' parameter names
439
    for nic in self.op.nics:
440
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
441
    # check that NIC's parameters names are unique and valid
442
    utils.ValidateDeviceNames("NIC", self.op.nics)
443

    
444
    self._CheckVLANArguments()
445

    
446
    self._CheckDiskArguments()
447
    assert self.op.disk_template is not None
448

    
449
    # instance name verification
450
    if self.op.name_check:
451
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
452
      self.op.instance_name = self.hostname.name
453
      # used in CheckPrereq for ip ping check
454
      self.check_ip = self.hostname.ip
455
    else:
456
      self.check_ip = None
457

    
458
    # file storage checks
459
    if (self.op.file_driver and
460
        not self.op.file_driver in constants.FILE_DRIVER):
461
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
462
                                 self.op.file_driver, errors.ECODE_INVAL)
463

    
464
    # set default file_driver if unset and required
465
    if (not self.op.file_driver and
466
        self.op.disk_template in [constants.DT_FILE,
467
                                  constants.DT_SHARED_FILE]):
468
      self.op.file_driver = constants.FD_LOOP
469

    
470
    ### Node/iallocator related checks
471
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
472

    
473
    if self.op.pnode is not None:
474
      if self.op.disk_template in constants.DTS_INT_MIRROR:
475
        if self.op.snode is None:
476
          raise errors.OpPrereqError("The networked disk templates need"
477
                                     " a mirror node", errors.ECODE_INVAL)
478
      elif self.op.snode:
479
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
480
                        " template")
481
        self.op.snode = None
482

    
483
    _CheckOpportunisticLocking(self.op)
484

    
485
    if self.op.mode == constants.INSTANCE_IMPORT:
486
      # On import force_variant must be True, because if we forced it at
487
      # initial install, our only chance when importing it back is that it
488
      # works again!
489
      self.op.force_variant = True
490

    
491
      if self.op.no_install:
492
        self.LogInfo("No-installation mode has no effect during import")
493

    
494
    elif self.op.mode == constants.INSTANCE_CREATE:
495
      if self.op.os_type is None:
496
        raise errors.OpPrereqError("No guest OS specified",
497
                                   errors.ECODE_INVAL)
498
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
499
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
500
                                   " installation" % self.op.os_type,
501
                                   errors.ECODE_STATE)
502
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
503
      self._cds = GetClusterDomainSecret()
504

    
505
      # Check handshake to ensure both clusters have the same domain secret
506
      src_handshake = self.op.source_handshake
507
      if not src_handshake:
508
        raise errors.OpPrereqError("Missing source handshake",
509
                                   errors.ECODE_INVAL)
510

    
511
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
512
                                                           src_handshake)
513
      if errmsg:
514
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
515
                                   errors.ECODE_INVAL)
516

    
517
      # Load and check source CA
518
      self.source_x509_ca_pem = self.op.source_x509_ca
519
      if not self.source_x509_ca_pem:
520
        raise errors.OpPrereqError("Missing source X509 CA",
521
                                   errors.ECODE_INVAL)
522

    
523
      try:
524
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
525
                                                    self._cds)
526
      except OpenSSL.crypto.Error, err:
527
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
528
                                   (err, ), errors.ECODE_INVAL)
529

    
530
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
531
      if errcode is not None:
532
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
533
                                   errors.ECODE_INVAL)
534

    
535
      self.source_x509_ca = cert
536

    
537
      src_instance_name = self.op.source_instance_name
538
      if not src_instance_name:
539
        raise errors.OpPrereqError("Missing source instance name",
540
                                   errors.ECODE_INVAL)
541

    
542
      self.source_instance_name = \
543
        netutils.GetHostname(name=src_instance_name).name
544

    
545
    else:
546
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
547
                                 self.op.mode, errors.ECODE_INVAL)
548

    
549
  def ExpandNames(self):
550
    """ExpandNames for CreateInstance.
551

552
    Figure out the right locks for instance creation.
553

554
    """
555
    self.needed_locks = {}
556

    
557
    # this is just a preventive check, but someone might still add this
558
    # instance in the meantime, and creation will fail at lock-add time
559
    if self.op.instance_name in\
560
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
561
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
562
                                 self.op.instance_name, errors.ECODE_EXISTS)
563

    
564
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
565

    
566
    if self.op.iallocator:
567
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
568
      # specifying a group on instance creation and then selecting nodes from
569
      # that group
570
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
571
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
572

    
573
      if self.op.opportunistic_locking:
574
        self.opportunistic_locks[locking.LEVEL_NODE] = True
575
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
576
    else:
577
      (self.op.pnode_uuid, self.op.pnode) = \
578
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
579
      nodelist = [self.op.pnode_uuid]
580
      if self.op.snode is not None:
581
        (self.op.snode_uuid, self.op.snode) = \
582
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
583
        nodelist.append(self.op.snode_uuid)
584
      self.needed_locks[locking.LEVEL_NODE] = nodelist
585

    
586
    # in case of import lock the source node too
587
    if self.op.mode == constants.INSTANCE_IMPORT:
588
      src_node = self.op.src_node
589
      src_path = self.op.src_path
590

    
591
      if src_path is None:
592
        self.op.src_path = src_path = self.op.instance_name
593

    
594
      if src_node is None:
595
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
596
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
597
        self.op.src_node = None
598
        if os.path.isabs(src_path):
599
          raise errors.OpPrereqError("Importing an instance from a path"
600
                                     " requires a source node option",
601
                                     errors.ECODE_INVAL)
602
      else:
603
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
604
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
605
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
606
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
607
        if not os.path.isabs(src_path):
608
          self.op.src_path = \
609
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
610

    
611
    self.needed_locks[locking.LEVEL_NODE_RES] = \
612
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
613

    
614
  def _RunAllocator(self):
615
    """Run the allocator based on input opcode.
616

617
    """
618
    if self.op.opportunistic_locking:
619
      # Only consider nodes for which a lock is held
620
      node_name_whitelist = self.cfg.GetNodeNames(
621
        self.owned_locks(locking.LEVEL_NODE))
622
    else:
623
      node_name_whitelist = None
624

    
625
    req = _CreateInstanceAllocRequest(self.op, self.disks,
626
                                      self.nics, self.be_full,
627
                                      node_name_whitelist)
628
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
629

    
630
    ial.Run(self.op.iallocator)
631

    
632
    if not ial.success:
633
      # When opportunistic locks are used only a temporary failure is generated
634
      if self.op.opportunistic_locking:
635
        ecode = errors.ECODE_TEMP_NORES
636
      else:
637
        ecode = errors.ECODE_NORES
638

    
639
      raise errors.OpPrereqError("Can't compute nodes using"
640
                                 " iallocator '%s': %s" %
641
                                 (self.op.iallocator, ial.info),
642
                                 ecode)
643

    
644
    (self.op.pnode_uuid, self.op.pnode) = \
645
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
646
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
647
                 self.op.instance_name, self.op.iallocator,
648
                 utils.CommaJoin(ial.result))
649

    
650
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
651

    
652
    if req.RequiredNodes() == 2:
653
      (self.op.snode_uuid, self.op.snode) = \
654
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
655

    
656
  def BuildHooksEnv(self):
657
    """Build hooks env.
658

659
    This runs on master, primary and secondary nodes of the instance.
660

661
    """
662
    env = {
663
      "ADD_MODE": self.op.mode,
664
      }
665
    if self.op.mode == constants.INSTANCE_IMPORT:
666
      env["SRC_NODE"] = self.op.src_node
667
      env["SRC_PATH"] = self.op.src_path
668
      env["SRC_IMAGES"] = self.src_images
669

    
670
    env.update(BuildInstanceHookEnv(
671
      name=self.op.instance_name,
672
      primary_node_name=self.op.pnode,
673
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
674
      status=self.op.start,
675
      os_type=self.op.os_type,
676
      minmem=self.be_full[constants.BE_MINMEM],
677
      maxmem=self.be_full[constants.BE_MAXMEM],
678
      vcpus=self.be_full[constants.BE_VCPUS],
679
      nics=NICListToTuple(self, self.nics),
680
      disk_template=self.op.disk_template,
681
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
682
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
683
             for d in self.disks],
684
      bep=self.be_full,
685
      hvp=self.hv_full,
686
      hypervisor_name=self.op.hypervisor,
687
      tags=self.op.tags,
688
      ))
689

    
690
    return env
691

    
692
  def BuildHooksNodes(self):
693
    """Build hooks nodes.
694

695
    """
696
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
697
    return nl, nl
698

    
699
  def _ReadExportInfo(self):
700
    """Reads the export information from disk.
701

702
    It will override the opcode source node and path with the actual
703
    information, if these two were not specified before.
704

705
    @return: the export information
706

707
    """
708
    assert self.op.mode == constants.INSTANCE_IMPORT
709

    
710
    if self.op.src_node_uuid is None:
711
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
712
      exp_list = self.rpc.call_export_list(locked_nodes)
713
      found = False
714
      for node_uuid in exp_list:
715
        if exp_list[node_uuid].fail_msg:
716
          continue
717
        if self.op.src_path in exp_list[node_uuid].payload:
718
          found = True
719
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
720
          self.op.src_node_uuid = node_uuid
721
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
722
                                            self.op.src_path)
723
          break
724
      if not found:
725
        raise errors.OpPrereqError("No export found for relative path %s" %
726
                                   self.op.src_path, errors.ECODE_INVAL)
727

    
728
    CheckNodeOnline(self, self.op.src_node_uuid)
729
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
730
    result.Raise("No export or invalid export found in dir %s" %
731
                 self.op.src_path)
732

    
733
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
734
    if not export_info.has_section(constants.INISECT_EXP):
735
      raise errors.ProgrammerError("Corrupted export config",
736
                                   errors.ECODE_ENVIRON)
737

    
738
    ei_version = export_info.get(constants.INISECT_EXP, "version")
739
    if int(ei_version) != constants.EXPORT_VERSION:
740
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
741
                                 (ei_version, constants.EXPORT_VERSION),
742
                                 errors.ECODE_ENVIRON)
743
    return export_info
744

    
745
  def _ReadExportParams(self, einfo):
746
    """Use export parameters as defaults.
747

748
    In case the opcode doesn't specify (as in override) some instance
749
    parameters, then try to use them from the export information, if
750
    that declares them.
751

752
    """
753
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
754

    
755
    if not self.op.disks:
756
      disks = []
757
      # TODO: import the disk iv_name too
758
      for idx in range(constants.MAX_DISKS):
759
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
760
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
761
          disks.append({constants.IDISK_SIZE: disk_sz})
762
      self.op.disks = disks
763
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
764
        raise errors.OpPrereqError("No disk info specified and the export"
765
                                   " is missing the disk information",
766
                                   errors.ECODE_INVAL)
767

    
768
    if not self.op.nics:
769
      nics = []
770
      for idx in range(constants.MAX_NICS):
771
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
772
          ndict = {}
773
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
774
            nic_param_name = "nic%d_%s" % (idx, name)
775
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
776
              v = einfo.get(constants.INISECT_INS, nic_param_name)
777
              ndict[name] = v
778
          nics.append(ndict)
779
        else:
780
          break
781
      self.op.nics = nics
782

    
783
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
784
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
785

    
786
    if (self.op.hypervisor is None and
787
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
788
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
789

    
790
    if einfo.has_section(constants.INISECT_HYP):
791
      # use the export parameters but do not override the ones
792
      # specified by the user
793
      for name, value in einfo.items(constants.INISECT_HYP):
794
        if name not in self.op.hvparams:
795
          self.op.hvparams[name] = value
796

    
797
    if einfo.has_section(constants.INISECT_BEP):
798
      # use the parameters, without overriding
799
      for name, value in einfo.items(constants.INISECT_BEP):
800
        if name not in self.op.beparams:
801
          self.op.beparams[name] = value
802
        # Compatibility for the old "memory" be param
803
        if name == constants.BE_MEMORY:
804
          if constants.BE_MAXMEM not in self.op.beparams:
805
            self.op.beparams[constants.BE_MAXMEM] = value
806
          if constants.BE_MINMEM not in self.op.beparams:
807
            self.op.beparams[constants.BE_MINMEM] = value
808
    else:
809
      # try to read the parameters old style, from the main section
810
      for name in constants.BES_PARAMETERS:
811
        if (name not in self.op.beparams and
812
            einfo.has_option(constants.INISECT_INS, name)):
813
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
814

    
815
    if einfo.has_section(constants.INISECT_OSP):
816
      # use the parameters, without overriding
817
      for name, value in einfo.items(constants.INISECT_OSP):
818
        if name not in self.op.osparams:
819
          self.op.osparams[name] = value
820

    
821
  def _RevertToDefaults(self, cluster):
822
    """Revert the instance parameters to the default values.
823

824
    """
825
    # hvparams
826
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
827
    for name in self.op.hvparams.keys():
828
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
829
        del self.op.hvparams[name]
830
    # beparams
831
    be_defs = cluster.SimpleFillBE({})
832
    for name in self.op.beparams.keys():
833
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
834
        del self.op.beparams[name]
835
    # nic params
836
    nic_defs = cluster.SimpleFillNIC({})
837
    for nic in self.op.nics:
838
      for name in constants.NICS_PARAMETERS:
839
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
840
          del nic[name]
841
    # osparams
842
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
843
    for name in self.op.osparams.keys():
844
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
845
        del self.op.osparams[name]
846

    
847
  def _CalculateFileStorageDir(self):
848
    """Calculate final instance file storage dir.
849

850
    """
851
    # file storage dir calculation/check
852
    self.instance_file_storage_dir = None
853
    if self.op.disk_template in constants.DTS_FILEBASED:
854
      # build the full file storage dir path
855
      joinargs = []
856

    
857
      if self.op.disk_template == constants.DT_SHARED_FILE:
858
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
859
      else:
860
        get_fsd_fn = self.cfg.GetFileStorageDir
861

    
862
      cfg_storagedir = get_fsd_fn()
863
      if not cfg_storagedir:
864
        raise errors.OpPrereqError("Cluster file storage dir not defined",
865
                                   errors.ECODE_STATE)
866
      joinargs.append(cfg_storagedir)
867

    
868
      if self.op.file_storage_dir is not None:
869
        joinargs.append(self.op.file_storage_dir)
870

    
871
      joinargs.append(self.op.instance_name)
872

    
873
      # pylint: disable=W0142
874
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
875

    
876
  def CheckPrereq(self): # pylint: disable=R0914
877
    """Check prerequisites.
878

879
    """
880
    self._CalculateFileStorageDir()
881

    
882
    if self.op.mode == constants.INSTANCE_IMPORT:
883
      export_info = self._ReadExportInfo()
884
      self._ReadExportParams(export_info)
885
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
886
    else:
887
      self._old_instance_name = None
888

    
889
    if (not self.cfg.GetVGName() and
890
        self.op.disk_template not in constants.DTS_NOT_LVM):
891
      raise errors.OpPrereqError("Cluster does not support lvm-based"
892
                                 " instances", errors.ECODE_STATE)
893

    
894
    if (self.op.hypervisor is None or
895
        self.op.hypervisor == constants.VALUE_AUTO):
896
      self.op.hypervisor = self.cfg.GetHypervisorType()
897

    
898
    cluster = self.cfg.GetClusterInfo()
899
    enabled_hvs = cluster.enabled_hypervisors
900
    if self.op.hypervisor not in enabled_hvs:
901
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
902
                                 " cluster (%s)" %
903
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
904
                                 errors.ECODE_STATE)
905

    
906
    # Check tag validity
907
    for tag in self.op.tags:
908
      objects.TaggableObject.ValidateTag(tag)
909

    
910
    # check hypervisor parameter syntax (locally)
911
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
912
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
913
                                      self.op.hvparams)
914
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
915
    hv_type.CheckParameterSyntax(filled_hvp)
916
    self.hv_full = filled_hvp
917
    # check that we don't specify global parameters on an instance
918
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
919
                         "instance", "cluster")
920

    
921
    # fill and remember the beparams dict
922
    self.be_full = _ComputeFullBeParams(self.op, cluster)
923

    
924
    # build os parameters
925
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
926

    
927
    # now that hvp/bep are in final format, let's reset to defaults,
928
    # if told to do so
929
    if self.op.identify_defaults:
930
      self._RevertToDefaults(cluster)
931

    
932
    # NIC buildup
933
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
934
                             self.proc.GetECId())
935

    
936
    # disk checks/pre-build
937
    default_vg = self.cfg.GetVGName()
938
    self.disks = ComputeDisks(self.op, default_vg)
939

    
940
    if self.op.mode == constants.INSTANCE_IMPORT:
941
      disk_images = []
942
      for idx in range(len(self.disks)):
943
        option = "disk%d_dump" % idx
944
        if export_info.has_option(constants.INISECT_INS, option):
945
          # FIXME: are the old os-es, disk sizes, etc. useful?
946
          export_name = export_info.get(constants.INISECT_INS, option)
947
          image = utils.PathJoin(self.op.src_path, export_name)
948
          disk_images.append(image)
949
        else:
950
          disk_images.append(False)
951

    
952
      self.src_images = disk_images
953

    
954
      if self.op.instance_name == self._old_instance_name:
955
        for idx, nic in enumerate(self.nics):
956
          if nic.mac == constants.VALUE_AUTO:
957
            nic_mac_ini = "nic%d_mac" % idx
958
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
959

    
960
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
961

    
962
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
963
    if self.op.ip_check:
964
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
965
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
966
                                   (self.check_ip, self.op.instance_name),
967
                                   errors.ECODE_NOTUNIQUE)
968

    
969
    #### mac address generation
970
    # By generating here the mac address both the allocator and the hooks get
971
    # the real final mac address rather than the 'auto' or 'generate' value.
972
    # There is a race condition between the generation and the instance object
973
    # creation, which means that we know the mac is valid now, but we're not
974
    # sure it will be when we actually add the instance. If things go bad
975
    # adding the instance will abort because of a duplicate mac, and the
976
    # creation job will fail.
977
    for nic in self.nics:
978
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
979
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
980

    
981
    #### allocator run
982

    
983
    if self.op.iallocator is not None:
984
      self._RunAllocator()
985

    
986
    # Release all unneeded node locks
987
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
988
                               self.op.src_node_uuid])
989
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
990
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
991
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
992

    
993
    assert (self.owned_locks(locking.LEVEL_NODE) ==
994
            self.owned_locks(locking.LEVEL_NODE_RES)), \
995
      "Node locks differ from node resource locks"
996

    
997
    #### node related checks
998

    
999
    # check primary node
1000
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1001
    assert self.pnode is not None, \
1002
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1003
    if pnode.offline:
1004
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1005
                                 pnode.name, errors.ECODE_STATE)
1006
    if pnode.drained:
1007
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1008
                                 pnode.name, errors.ECODE_STATE)
1009
    if not pnode.vm_capable:
1010
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1011
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1012

    
1013
    self.secondaries = []
1014

    
1015
    # Fill in any IPs from IP pools. This must happen here, because we need to
1016
    # know the nic's primary node, as specified by the iallocator
1017
    for idx, nic in enumerate(self.nics):
1018
      net_uuid = nic.network
1019
      if net_uuid is not None:
1020
        nobj = self.cfg.GetNetwork(net_uuid)
1021
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1022
        if netparams is None:
1023
          raise errors.OpPrereqError("No netparams found for network"
1024
                                     " %s. Probably not connected to"
1025
                                     " node's %s nodegroup" %
1026
                                     (nobj.name, self.pnode.name),
1027
                                     errors.ECODE_INVAL)
1028
        self.LogInfo("NIC/%d inherits netparams %s" %
1029
                     (idx, netparams.values()))
1030
        nic.nicparams = dict(netparams)
1031
        if nic.ip is not None:
1032
          if nic.ip.lower() == constants.NIC_IP_POOL:
1033
            try:
1034
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1035
            except errors.ReservationError:
1036
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1037
                                         " from the address pool" % idx,
1038
                                         errors.ECODE_STATE)
1039
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1040
          else:
1041
            try:
1042
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1043
            except errors.ReservationError:
1044
              raise errors.OpPrereqError("IP address %s already in use"
1045
                                         " or does not belong to network %s" %
1046
                                         (nic.ip, nobj.name),
1047
                                         errors.ECODE_NOTUNIQUE)
1048

    
1049
      # net is None, ip None or given
1050
      elif self.op.conflicts_check:
1051
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1052

    
1053
    # mirror node verification
1054
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1055
      if self.op.snode_uuid == pnode.uuid:
1056
        raise errors.OpPrereqError("The secondary node cannot be the"
1057
                                   " primary node", errors.ECODE_INVAL)
1058
      CheckNodeOnline(self, self.op.snode_uuid)
1059
      CheckNodeNotDrained(self, self.op.snode_uuid)
1060
      CheckNodeVmCapable(self, self.op.snode_uuid)
1061
      self.secondaries.append(self.op.snode_uuid)
1062

    
1063
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1064
      if pnode.group != snode.group:
1065
        self.LogWarning("The primary and secondary nodes are in two"
1066
                        " different node groups; the disk parameters"
1067
                        " from the first disk's node group will be"
1068
                        " used")
1069

    
1070
    nodes = [pnode]
1071
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1072
      nodes.append(snode)
1073
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1074
    excl_stor = compat.any(map(has_es, nodes))
1075
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1076
      raise errors.OpPrereqError("Disk template %s not supported with"
1077
                                 " exclusive storage" % self.op.disk_template,
1078
                                 errors.ECODE_STATE)
1079
    for disk in self.disks:
1080
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1081

    
1082
    node_uuids = [pnode.uuid] + self.secondaries
1083

    
1084
    if not self.adopt_disks:
1085
      if self.op.disk_template == constants.DT_RBD:
1086
        # _CheckRADOSFreeSpace() is just a placeholder.
1087
        # Any function that checks prerequisites can be placed here.
1088
        # Check if there is enough space on the RADOS cluster.
1089
        CheckRADOSFreeSpace()
1090
      elif self.op.disk_template == constants.DT_EXT:
1091
        # FIXME: Function that checks prereqs if needed
1092
        pass
1093
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1094
        # Check lv size requirements, if not adopting
1095
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1096
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1097
      else:
1098
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1099
        pass
1100

    
1101
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1102
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1103
                                disk[constants.IDISK_ADOPT])
1104
                     for disk in self.disks])
1105
      if len(all_lvs) != len(self.disks):
1106
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1107
                                   errors.ECODE_INVAL)
1108
      for lv_name in all_lvs:
1109
        try:
1110
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1111
          # to ReserveLV uses the same syntax
1112
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1113
        except errors.ReservationError:
1114
          raise errors.OpPrereqError("LV named %s used by another instance" %
1115
                                     lv_name, errors.ECODE_NOTUNIQUE)
1116

    
1117
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1118
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1119

    
1120
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1121
                                       vg_names.payload.keys())[pnode.uuid]
1122
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1123
      node_lvs = node_lvs.payload
1124

    
1125
      delta = all_lvs.difference(node_lvs.keys())
1126
      if delta:
1127
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1128
                                   utils.CommaJoin(delta),
1129
                                   errors.ECODE_INVAL)
1130
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1131
      if online_lvs:
1132
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1133
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1134
                                   errors.ECODE_STATE)
1135
      # update the size of disk based on what is found
1136
      for dsk in self.disks:
1137
        dsk[constants.IDISK_SIZE] = \
1138
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1139
                                        dsk[constants.IDISK_ADOPT])][0]))
1140

    
1141
    elif self.op.disk_template == constants.DT_BLOCK:
1142
      # Normalize and de-duplicate device paths
1143
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1144
                       for disk in self.disks])
1145
      if len(all_disks) != len(self.disks):
1146
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1147
                                   errors.ECODE_INVAL)
1148
      baddisks = [d for d in all_disks
1149
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1150
      if baddisks:
1151
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1152
                                   " cannot be adopted" %
1153
                                   (utils.CommaJoin(baddisks),
1154
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1155
                                   errors.ECODE_INVAL)
1156

    
1157
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1158
                                            list(all_disks))[pnode.uuid]
1159
      node_disks.Raise("Cannot get block device information from node %s" %
1160
                       pnode.name)
1161
      node_disks = node_disks.payload
1162
      delta = all_disks.difference(node_disks.keys())
1163
      if delta:
1164
        raise errors.OpPrereqError("Missing block device(s): %s" %
1165
                                   utils.CommaJoin(delta),
1166
                                   errors.ECODE_INVAL)
1167
      for dsk in self.disks:
1168
        dsk[constants.IDISK_SIZE] = \
1169
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1170

    
1171
    # Verify instance specs
1172
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1173
    ispec = {
1174
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1175
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1176
      constants.ISPEC_DISK_COUNT: len(self.disks),
1177
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1178
                                  for disk in self.disks],
1179
      constants.ISPEC_NIC_COUNT: len(self.nics),
1180
      constants.ISPEC_SPINDLE_USE: spindle_use,
1181
      }
1182

    
1183
    group_info = self.cfg.GetNodeGroup(pnode.group)
1184
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1185
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1186
                                               self.op.disk_template)
1187
    if not self.op.ignore_ipolicy and res:
1188
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1189
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1190
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1191

    
1192
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1193

    
1194
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1195
    # check OS parameters (remotely)
1196
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1197

    
1198
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1199

    
1200
    #TODO: _CheckExtParams (remotely)
1201
    # Check parameters for extstorage
1202

    
1203
    # memory check on primary node
1204
    #TODO(dynmem): use MINMEM for checking
1205
    if self.op.start:
1206
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1207
                                self.op.hvparams)
1208
      CheckNodeFreeMemory(self, self.pnode.uuid,
1209
                          "creating instance %s" % self.op.instance_name,
1210
                          self.be_full[constants.BE_MAXMEM],
1211
                          self.op.hypervisor, hvfull)
1212

    
1213
    self.dry_run_result = list(node_uuids)
1214

    
1215
  def Exec(self, feedback_fn):
1216
    """Create and add the instance to the cluster.
1217

1218
    """
1219
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1220
                self.owned_locks(locking.LEVEL_NODE)), \
1221
      "Node locks differ from node resource locks"
1222
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1223

    
1224
    ht_kind = self.op.hypervisor
1225
    if ht_kind in constants.HTS_REQ_PORT:
1226
      network_port = self.cfg.AllocatePort()
1227
    else:
1228
      network_port = None
1229

    
1230
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1231

    
1232
    # This is ugly but we got a chicken-egg problem here
1233
    # We can only take the group disk parameters, as the instance
1234
    # has no disks yet (we are generating them right here).
1235
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1236
    disks = GenerateDiskTemplate(self,
1237
                                 self.op.disk_template,
1238
                                 instance_uuid, self.pnode.uuid,
1239
                                 self.secondaries,
1240
                                 self.disks,
1241
                                 self.instance_file_storage_dir,
1242
                                 self.op.file_driver,
1243
                                 0,
1244
                                 feedback_fn,
1245
                                 self.cfg.GetGroupDiskParams(nodegroup))
1246

    
1247
    iobj = objects.Instance(name=self.op.instance_name,
1248
                            uuid=instance_uuid,
1249
                            os=self.op.os_type,
1250
                            primary_node=self.pnode.uuid,
1251
                            nics=self.nics, disks=disks,
1252
                            disk_template=self.op.disk_template,
1253
                            disks_active=False,
1254
                            admin_state=constants.ADMINST_DOWN,
1255
                            network_port=network_port,
1256
                            beparams=self.op.beparams,
1257
                            hvparams=self.op.hvparams,
1258
                            hypervisor=self.op.hypervisor,
1259
                            osparams=self.op.osparams,
1260
                            )
1261

    
1262
    if self.op.tags:
1263
      for tag in self.op.tags:
1264
        iobj.AddTag(tag)
1265

    
1266
    if self.adopt_disks:
1267
      if self.op.disk_template == constants.DT_PLAIN:
1268
        # rename LVs to the newly-generated names; we need to construct
1269
        # 'fake' LV disks with the old data, plus the new unique_id
1270
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1271
        rename_to = []
1272
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1273
          rename_to.append(t_dsk.logical_id)
1274
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1275
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1276
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1277
                                               zip(tmp_disks, rename_to))
1278
        result.Raise("Failed to rename adoped LVs")
1279
    else:
1280
      feedback_fn("* creating instance disks...")
1281
      try:
1282
        CreateDisks(self, iobj)
1283
      except errors.OpExecError:
1284
        self.LogWarning("Device creation failed")
1285
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1286
        raise
1287

    
1288
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1289

    
1290
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1291

    
1292
    # Declare that we don't want to remove the instance lock anymore, as we've
1293
    # added the instance to the config
1294
    del self.remove_locks[locking.LEVEL_INSTANCE]
1295

    
1296
    if self.op.mode == constants.INSTANCE_IMPORT:
1297
      # Release unused nodes
1298
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1299
    else:
1300
      # Release all nodes
1301
      ReleaseLocks(self, locking.LEVEL_NODE)
1302

    
1303
    disk_abort = False
1304
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1305
      feedback_fn("* wiping instance disks...")
1306
      try:
1307
        WipeDisks(self, iobj)
1308
      except errors.OpExecError, err:
1309
        logging.exception("Wiping disks failed")
1310
        self.LogWarning("Wiping instance disks failed (%s)", err)
1311
        disk_abort = True
1312

    
1313
    if disk_abort:
1314
      # Something is already wrong with the disks, don't do anything else
1315
      pass
1316
    elif self.op.wait_for_sync:
1317
      disk_abort = not WaitForSync(self, iobj)
1318
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1319
      # make sure the disks are not degraded (still sync-ing is ok)
1320
      feedback_fn("* checking mirrors status")
1321
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1322
    else:
1323
      disk_abort = False
1324

    
1325
    if disk_abort:
1326
      RemoveDisks(self, iobj)
1327
      self.cfg.RemoveInstance(iobj.uuid)
1328
      # Make sure the instance lock gets removed
1329
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1330
      raise errors.OpExecError("There are some degraded disks for"
1331
                               " this instance")
1332

    
1333
    # instance disks are now active
1334
    iobj.disks_active = True
1335

    
1336
    # Release all node resource locks
1337
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1338

    
1339
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1340
      # we need to set the disks ID to the primary node, since the
1341
      # preceding code might or might have not done it, depending on
1342
      # disk template and other options
1343
      for disk in iobj.disks:
1344
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1345
      if self.op.mode == constants.INSTANCE_CREATE:
1346
        if not self.op.no_install:
1347
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1348
                        not self.op.wait_for_sync)
1349
          if pause_sync:
1350
            feedback_fn("* pausing disk sync to install instance OS")
1351
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1352
                                                              (iobj.disks,
1353
                                                               iobj), True)
1354
            for idx, success in enumerate(result.payload):
1355
              if not success:
1356
                logging.warn("pause-sync of instance %s for disk %d failed",
1357
                             self.op.instance_name, idx)
1358

    
1359
          feedback_fn("* running the instance OS create scripts...")
1360
          # FIXME: pass debug option from opcode to backend
1361
          os_add_result = \
1362
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1363
                                          self.op.debug_level)
1364
          if pause_sync:
1365
            feedback_fn("* resuming disk sync")
1366
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1367
                                                              (iobj.disks,
1368
                                                               iobj), False)
1369
            for idx, success in enumerate(result.payload):
1370
              if not success:
1371
                logging.warn("resume-sync of instance %s for disk %d failed",
1372
                             self.op.instance_name, idx)
1373

    
1374
          os_add_result.Raise("Could not add os for instance %s"
1375
                              " on node %s" % (self.op.instance_name,
1376
                                               self.pnode.name))
1377

    
1378
      else:
1379
        if self.op.mode == constants.INSTANCE_IMPORT:
1380
          feedback_fn("* running the instance OS import scripts...")
1381

    
1382
          transfers = []
1383

    
1384
          for idx, image in enumerate(self.src_images):
1385
            if not image:
1386
              continue
1387

    
1388
            # FIXME: pass debug option from opcode to backend
1389
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1390
                                               constants.IEIO_FILE, (image, ),
1391
                                               constants.IEIO_SCRIPT,
1392
                                               ((iobj.disks[idx], iobj), idx),
1393
                                               None)
1394
            transfers.append(dt)
1395

    
1396
          import_result = \
1397
            masterd.instance.TransferInstanceData(self, feedback_fn,
1398
                                                  self.op.src_node_uuid,
1399
                                                  self.pnode.uuid,
1400
                                                  self.pnode.secondary_ip,
1401
                                                  iobj, transfers)
1402
          if not compat.all(import_result):
1403
            self.LogWarning("Some disks for instance %s on node %s were not"
1404
                            " imported successfully" % (self.op.instance_name,
1405
                                                        self.pnode.name))
1406

    
1407
          rename_from = self._old_instance_name
1408

    
1409
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1410
          feedback_fn("* preparing remote import...")
1411
          # The source cluster will stop the instance before attempting to make
1412
          # a connection. In some cases stopping an instance can take a long
1413
          # time, hence the shutdown timeout is added to the connection
1414
          # timeout.
1415
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1416
                             self.op.source_shutdown_timeout)
1417
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1418

    
1419
          assert iobj.primary_node == self.pnode.uuid
1420
          disk_results = \
1421
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1422
                                          self.source_x509_ca,
1423
                                          self._cds, timeouts)
1424
          if not compat.all(disk_results):
1425
            # TODO: Should the instance still be started, even if some disks
1426
            # failed to import (valid for local imports, too)?
1427
            self.LogWarning("Some disks for instance %s on node %s were not"
1428
                            " imported successfully" % (self.op.instance_name,
1429
                                                        self.pnode.name))
1430

    
1431
          rename_from = self.source_instance_name
1432

    
1433
        else:
1434
          # also checked in the prereq part
1435
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1436
                                       % self.op.mode)
1437

    
1438
        # Run rename script on newly imported instance
1439
        assert iobj.name == self.op.instance_name
1440
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1441
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1442
                                                   rename_from,
1443
                                                   self.op.debug_level)
1444
        result.Warn("Failed to run rename script for %s on node %s" %
1445
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1446

    
1447
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1448

    
1449
    if self.op.start:
1450
      iobj.admin_state = constants.ADMINST_UP
1451
      self.cfg.Update(iobj, feedback_fn)
1452
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1453
                   self.pnode.name)
1454
      feedback_fn("* starting instance...")
1455
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1456
                                            False, self.op.reason)
1457
      result.Raise("Could not start instance")
1458

    
1459
    return list(iobj.all_nodes)
1460

    
1461

    
1462
class LUInstanceRename(LogicalUnit):
1463
  """Rename an instance.
1464

1465
  """
1466
  HPATH = "instance-rename"
1467
  HTYPE = constants.HTYPE_INSTANCE
1468

    
1469
  def CheckArguments(self):
1470
    """Check arguments.
1471

1472
    """
1473
    if self.op.ip_check and not self.op.name_check:
1474
      # TODO: make the ip check more flexible and not depend on the name check
1475
      raise errors.OpPrereqError("IP address check requires a name check",
1476
                                 errors.ECODE_INVAL)
1477

    
1478
  def BuildHooksEnv(self):
1479
    """Build hooks env.
1480

1481
    This runs on master, primary and secondary nodes of the instance.
1482

1483
    """
1484
    env = BuildInstanceHookEnvByObject(self, self.instance)
1485
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1486
    return env
1487

    
1488
  def BuildHooksNodes(self):
1489
    """Build hooks nodes.
1490

1491
    """
1492
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1493
    return (nl, nl)
1494

    
1495
  def CheckPrereq(self):
1496
    """Check prerequisites.
1497

1498
    This checks that the instance is in the cluster and is not running.
1499

1500
    """
1501
    (self.op.instance_uuid, self.op.instance_name) = \
1502
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1503
                                self.op.instance_name)
1504
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1505
    assert instance is not None
1506

    
1507
    # It should actually not happen that an instance is running with a disabled
1508
    # disk template, but in case it does, the renaming of file-based instances
1509
    # will fail horribly. Thus, we test it before.
1510
    if (instance.disk_template in constants.DTS_FILEBASED and
1511
        self.op.new_name != instance.name):
1512
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1513
                               instance.disk_template)
1514

    
1515
    CheckNodeOnline(self, instance.primary_node)
1516
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1517
                       msg="cannot rename")
1518
    self.instance = instance
1519

    
1520
    new_name = self.op.new_name
1521
    if self.op.name_check:
1522
      hostname = _CheckHostnameSane(self, new_name)
1523
      new_name = self.op.new_name = hostname.name
1524
      if (self.op.ip_check and
1525
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1526
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1527
                                   (hostname.ip, new_name),
1528
                                   errors.ECODE_NOTUNIQUE)
1529

    
1530
    instance_names = [inst.name for
1531
                      inst in self.cfg.GetAllInstancesInfo().values()]
1532
    if new_name in instance_names and new_name != instance.name:
1533
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1534
                                 new_name, errors.ECODE_EXISTS)
1535

    
1536
  def Exec(self, feedback_fn):
1537
    """Rename the instance.
1538

1539
    """
1540
    old_name = self.instance.name
1541

    
1542
    rename_file_storage = False
1543
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1544
        self.op.new_name != self.instance.name):
1545
      old_file_storage_dir = os.path.dirname(
1546
                               self.instance.disks[0].logical_id[1])
1547
      rename_file_storage = True
1548

    
1549
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1550
    # Change the instance lock. This is definitely safe while we hold the BGL.
1551
    # Otherwise the new lock would have to be added in acquired mode.
1552
    assert self.REQ_BGL
1553
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1554
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1555
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1556

    
1557
    # re-read the instance from the configuration after rename
1558
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1559

    
1560
    if rename_file_storage:
1561
      new_file_storage_dir = os.path.dirname(
1562
                               renamed_inst.disks[0].logical_id[1])
1563
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1564
                                                     old_file_storage_dir,
1565
                                                     new_file_storage_dir)
1566
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1567
                   " (but the instance has been renamed in Ganeti)" %
1568
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1569
                    old_file_storage_dir, new_file_storage_dir))
1570

    
1571
    StartInstanceDisks(self, renamed_inst, None)
1572
    # update info on disks
1573
    info = GetInstanceInfoText(renamed_inst)
1574
    for (idx, disk) in enumerate(renamed_inst.disks):
1575
      for node_uuid in renamed_inst.all_nodes:
1576
        self.cfg.SetDiskID(disk, node_uuid)
1577
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1578
                                                (disk, renamed_inst), info)
1579
        result.Warn("Error setting info on node %s for disk %s" %
1580
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1581
    try:
1582
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1583
                                                 renamed_inst, old_name,
1584
                                                 self.op.debug_level)
1585
      result.Warn("Could not run OS rename script for instance %s on node %s"
1586
                  " (but the instance has been renamed in Ganeti)" %
1587
                  (renamed_inst.name,
1588
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1589
                  self.LogWarning)
1590
    finally:
1591
      ShutdownInstanceDisks(self, renamed_inst)
1592

    
1593
    return renamed_inst.name
1594

    
1595

    
1596
class LUInstanceRemove(LogicalUnit):
1597
  """Remove an instance.
1598

1599
  """
1600
  HPATH = "instance-remove"
1601
  HTYPE = constants.HTYPE_INSTANCE
1602
  REQ_BGL = False
1603

    
1604
  def ExpandNames(self):
1605
    self._ExpandAndLockInstance()
1606
    self.needed_locks[locking.LEVEL_NODE] = []
1607
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1608
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1609

    
1610
  def DeclareLocks(self, level):
1611
    if level == locking.LEVEL_NODE:
1612
      self._LockInstancesNodes()
1613
    elif level == locking.LEVEL_NODE_RES:
1614
      # Copy node locks
1615
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1616
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1617

    
1618
  def BuildHooksEnv(self):
1619
    """Build hooks env.
1620

1621
    This runs on master, primary and secondary nodes of the instance.
1622

1623
    """
1624
    env = BuildInstanceHookEnvByObject(self, self.instance)
1625
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1626
    return env
1627

    
1628
  def BuildHooksNodes(self):
1629
    """Build hooks nodes.
1630

1631
    """
1632
    nl = [self.cfg.GetMasterNode()]
1633
    nl_post = list(self.instance.all_nodes) + nl
1634
    return (nl, nl_post)
1635

    
1636
  def CheckPrereq(self):
1637
    """Check prerequisites.
1638

1639
    This checks that the instance is in the cluster.
1640

1641
    """
1642
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1643
    assert self.instance is not None, \
1644
      "Cannot retrieve locked instance %s" % self.op.instance_name
1645

    
1646
  def Exec(self, feedback_fn):
1647
    """Remove the instance.
1648

1649
    """
1650
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1651
                 self.cfg.GetNodeName(self.instance.primary_node))
1652

    
1653
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1654
                                             self.instance,
1655
                                             self.op.shutdown_timeout,
1656
                                             self.op.reason)
1657
    if self.op.ignore_failures:
1658
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1659
    else:
1660
      result.Raise("Could not shutdown instance %s on node %s" %
1661
                   (self.instance.name,
1662
                    self.cfg.GetNodeName(self.instance.primary_node)))
1663

    
1664
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1665
            self.owned_locks(locking.LEVEL_NODE_RES))
1666
    assert not (set(self.instance.all_nodes) -
1667
                self.owned_locks(locking.LEVEL_NODE)), \
1668
      "Not owning correct locks"
1669

    
1670
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1671

    
1672

    
1673
class LUInstanceMove(LogicalUnit):
1674
  """Move an instance by data-copying.
1675

1676
  """
1677
  HPATH = "instance-move"
1678
  HTYPE = constants.HTYPE_INSTANCE
1679
  REQ_BGL = False
1680

    
1681
  def ExpandNames(self):
1682
    self._ExpandAndLockInstance()
1683
    (self.op.target_node_uuid, self.op.target_node) = \
1684
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1685
                            self.op.target_node)
1686
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1687
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1688
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1689

    
1690
  def DeclareLocks(self, level):
1691
    if level == locking.LEVEL_NODE:
1692
      self._LockInstancesNodes(primary_only=True)
1693
    elif level == locking.LEVEL_NODE_RES:
1694
      # Copy node locks
1695
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1696
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1697

    
1698
  def BuildHooksEnv(self):
1699
    """Build hooks env.
1700

1701
    This runs on master, primary and secondary nodes of the instance.
1702

1703
    """
1704
    env = {
1705
      "TARGET_NODE": self.op.target_node,
1706
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1707
      }
1708
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1709
    return env
1710

    
1711
  def BuildHooksNodes(self):
1712
    """Build hooks nodes.
1713

1714
    """
1715
    nl = [
1716
      self.cfg.GetMasterNode(),
1717
      self.instance.primary_node,
1718
      self.op.target_node_uuid,
1719
      ]
1720
    return (nl, nl)
1721

    
1722
  def CheckPrereq(self):
1723
    """Check prerequisites.
1724

1725
    This checks that the instance is in the cluster.
1726

1727
    """
1728
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1729
    assert self.instance is not None, \
1730
      "Cannot retrieve locked instance %s" % self.op.instance_name
1731

    
1732
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1733
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1734
                                 self.instance.disk_template,
1735
                                 errors.ECODE_STATE)
1736

    
1737
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1738
    assert target_node is not None, \
1739
      "Cannot retrieve locked node %s" % self.op.target_node
1740

    
1741
    self.target_node_uuid = target_node.uuid
1742
    if target_node.uuid == self.instance.primary_node:
1743
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1744
                                 (self.instance.name, target_node.name),
1745
                                 errors.ECODE_STATE)
1746

    
1747
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1748

    
1749
    for idx, dsk in enumerate(self.instance.disks):
1750
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1751
                              constants.DT_SHARED_FILE):
1752
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1753
                                   " cannot copy" % idx, errors.ECODE_STATE)
1754

    
1755
    CheckNodeOnline(self, target_node.uuid)
1756
    CheckNodeNotDrained(self, target_node.uuid)
1757
    CheckNodeVmCapable(self, target_node.uuid)
1758
    cluster = self.cfg.GetClusterInfo()
1759
    group_info = self.cfg.GetNodeGroup(target_node.group)
1760
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1761
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1762
                           ignore=self.op.ignore_ipolicy)
1763

    
1764
    if self.instance.admin_state == constants.ADMINST_UP:
1765
      # check memory requirements on the secondary node
1766
      CheckNodeFreeMemory(
1767
          self, target_node.uuid, "failing over instance %s" %
1768
          self.instance.name, bep[constants.BE_MAXMEM],
1769
          self.instance.hypervisor,
1770
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1771
    else:
1772
      self.LogInfo("Not checking memory on the secondary node as"
1773
                   " instance will not be started")
1774

    
1775
    # check bridge existance
1776
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1777

    
1778
  def Exec(self, feedback_fn):
1779
    """Move an instance.
1780

1781
    The move is done by shutting it down on its present node, copying
1782
    the data over (slow) and starting it on the new node.
1783

1784
    """
1785
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1786
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1787

    
1788
    self.LogInfo("Shutting down instance %s on source node %s",
1789
                 self.instance.name, source_node.name)
1790

    
1791
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1792
            self.owned_locks(locking.LEVEL_NODE_RES))
1793

    
1794
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1795
                                             self.op.shutdown_timeout,
1796
                                             self.op.reason)
1797
    if self.op.ignore_consistency:
1798
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1799
                  " anyway. Please make sure node %s is down. Error details" %
1800
                  (self.instance.name, source_node.name, source_node.name),
1801
                  self.LogWarning)
1802
    else:
1803
      result.Raise("Could not shutdown instance %s on node %s" %
1804
                   (self.instance.name, source_node.name))
1805

    
1806
    # create the target disks
1807
    try:
1808
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1809
    except errors.OpExecError:
1810
      self.LogWarning("Device creation failed")
1811
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1812
      raise
1813

    
1814
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1815

    
1816
    errs = []
1817
    # activate, get path, copy the data over
1818
    for idx, disk in enumerate(self.instance.disks):
1819
      self.LogInfo("Copying data for disk %d", idx)
1820
      result = self.rpc.call_blockdev_assemble(
1821
                 target_node.uuid, (disk, self.instance), self.instance.name,
1822
                 True, idx)
1823
      if result.fail_msg:
1824
        self.LogWarning("Can't assemble newly created disk %d: %s",
1825
                        idx, result.fail_msg)
1826
        errs.append(result.fail_msg)
1827
        break
1828
      dev_path = result.payload
1829
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1830
                                                                self.instance),
1831
                                             target_node.name, dev_path,
1832
                                             cluster_name)
1833
      if result.fail_msg:
1834
        self.LogWarning("Can't copy data over for disk %d: %s",
1835
                        idx, result.fail_msg)
1836
        errs.append(result.fail_msg)
1837
        break
1838

    
1839
    if errs:
1840
      self.LogWarning("Some disks failed to copy, aborting")
1841
      try:
1842
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1843
      finally:
1844
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1845
        raise errors.OpExecError("Errors during disk copy: %s" %
1846
                                 (",".join(errs),))
1847

    
1848
    self.instance.primary_node = target_node.uuid
1849
    self.cfg.Update(self.instance, feedback_fn)
1850

    
1851
    self.LogInfo("Removing the disks on the original node")
1852
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1853

    
1854
    # Only start the instance if it's marked as up
1855
    if self.instance.admin_state == constants.ADMINST_UP:
1856
      self.LogInfo("Starting instance %s on node %s",
1857
                   self.instance.name, target_node.name)
1858

    
1859
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1860
                                          ignore_secondaries=True)
1861
      if not disks_ok:
1862
        ShutdownInstanceDisks(self, self.instance)
1863
        raise errors.OpExecError("Can't activate the instance's disks")
1864

    
1865
      result = self.rpc.call_instance_start(target_node.uuid,
1866
                                            (self.instance, None, None), False,
1867
                                            self.op.reason)
1868
      msg = result.fail_msg
1869
      if msg:
1870
        ShutdownInstanceDisks(self, self.instance)
1871
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1872
                                 (self.instance.name, target_node.name, msg))
1873

    
1874

    
1875
class LUInstanceMultiAlloc(NoHooksLU):
1876
  """Allocates multiple instances at the same time.
1877

1878
  """
1879
  REQ_BGL = False
1880

    
1881
  def CheckArguments(self):
1882
    """Check arguments.
1883

1884
    """
1885
    nodes = []
1886
    for inst in self.op.instances:
1887
      if inst.iallocator is not None:
1888
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1889
                                   " instance objects", errors.ECODE_INVAL)
1890
      nodes.append(bool(inst.pnode))
1891
      if inst.disk_template in constants.DTS_INT_MIRROR:
1892
        nodes.append(bool(inst.snode))
1893

    
1894
    has_nodes = compat.any(nodes)
1895
    if compat.all(nodes) ^ has_nodes:
1896
      raise errors.OpPrereqError("There are instance objects providing"
1897
                                 " pnode/snode while others do not",
1898
                                 errors.ECODE_INVAL)
1899

    
1900
    if not has_nodes and self.op.iallocator is None:
1901
      default_iallocator = self.cfg.GetDefaultIAllocator()
1902
      if default_iallocator:
1903
        self.op.iallocator = default_iallocator
1904
      else:
1905
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1906
                                   " given and no cluster-wide default"
1907
                                   " iallocator found; please specify either"
1908
                                   " an iallocator or nodes on the instances"
1909
                                   " or set a cluster-wide default iallocator",
1910
                                   errors.ECODE_INVAL)
1911

    
1912
    _CheckOpportunisticLocking(self.op)
1913

    
1914
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1915
    if dups:
1916
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1917
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1918

    
1919
  def ExpandNames(self):
1920
    """Calculate the locks.
1921

1922
    """
1923
    self.share_locks = ShareAll()
1924
    self.needed_locks = {
1925
      # iallocator will select nodes and even if no iallocator is used,
1926
      # collisions with LUInstanceCreate should be avoided
1927
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1928
      }
1929

    
1930
    if self.op.iallocator:
1931
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1932
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1933

    
1934
      if self.op.opportunistic_locking:
1935
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1936
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1937
    else:
1938
      nodeslist = []
1939
      for inst in self.op.instances:
1940
        (inst.pnode_uuid, inst.pnode) = \
1941
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1942
        nodeslist.append(inst.pnode_uuid)
1943
        if inst.snode is not None:
1944
          (inst.snode_uuid, inst.snode) = \
1945
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1946
          nodeslist.append(inst.snode_uuid)
1947

    
1948
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1949
      # Lock resources of instance's primary and secondary nodes (copy to
1950
      # prevent accidential modification)
1951
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1952

    
1953
  def CheckPrereq(self):
1954
    """Check prerequisite.
1955

1956
    """
1957
    if self.op.iallocator:
1958
      cluster = self.cfg.GetClusterInfo()
1959
      default_vg = self.cfg.GetVGName()
1960
      ec_id = self.proc.GetECId()
1961

    
1962
      if self.op.opportunistic_locking:
1963
        # Only consider nodes for which a lock is held
1964
        node_whitelist = self.cfg.GetNodeNames(
1965
                           list(self.owned_locks(locking.LEVEL_NODE)))
1966
      else:
1967
        node_whitelist = None
1968

    
1969
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1970
                                           _ComputeNics(op, cluster, None,
1971
                                                        self.cfg, ec_id),
1972
                                           _ComputeFullBeParams(op, cluster),
1973
                                           node_whitelist)
1974
               for op in self.op.instances]
1975

    
1976
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1977
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1978

    
1979
      ial.Run(self.op.iallocator)
1980

    
1981
      if not ial.success:
1982
        raise errors.OpPrereqError("Can't compute nodes using"
1983
                                   " iallocator '%s': %s" %
1984
                                   (self.op.iallocator, ial.info),
1985
                                   errors.ECODE_NORES)
1986

    
1987
      self.ia_result = ial.result
1988

    
1989
    if self.op.dry_run:
1990
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1991
        constants.JOB_IDS_KEY: [],
1992
        })
1993

    
1994
  def _ConstructPartialResult(self):
1995
    """Contructs the partial result.
1996

1997
    """
1998
    if self.op.iallocator:
1999
      (allocatable, failed_insts) = self.ia_result
2000
      allocatable_insts = map(compat.fst, allocatable)
2001
    else:
2002
      allocatable_insts = [op.instance_name for op in self.op.instances]
2003
      failed_insts = []
2004

    
2005
    return {
2006
      constants.ALLOCATABLE_KEY: allocatable_insts,
2007
      constants.FAILED_KEY: failed_insts,
2008
      }
2009

    
2010
  def Exec(self, feedback_fn):
2011
    """Executes the opcode.
2012

2013
    """
2014
    jobs = []
2015
    if self.op.iallocator:
2016
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2017
      (allocatable, failed) = self.ia_result
2018

    
2019
      for (name, node_names) in allocatable:
2020
        op = op2inst.pop(name)
2021

    
2022
        (op.pnode_uuid, op.pnode) = \
2023
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2024
        if len(node_names) > 1:
2025
          (op.snode_uuid, op.snode) = \
2026
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2027

    
2028
          jobs.append([op])
2029

    
2030
        missing = set(op2inst.keys()) - set(failed)
2031
        assert not missing, \
2032
          "Iallocator did return incomplete result: %s" % \
2033
          utils.CommaJoin(missing)
2034
    else:
2035
      jobs.extend([op] for op in self.op.instances)
2036

    
2037
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2038

    
2039

    
2040
class _InstNicModPrivate:
2041
  """Data structure for network interface modifications.
2042

2043
  Used by L{LUInstanceSetParams}.
2044

2045
  """
2046
  def __init__(self):
2047
    self.params = None
2048
    self.filled = None
2049

    
2050

    
2051
def _PrepareContainerMods(mods, private_fn):
2052
  """Prepares a list of container modifications by adding a private data field.
2053

2054
  @type mods: list of tuples; (operation, index, parameters)
2055
  @param mods: List of modifications
2056
  @type private_fn: callable or None
2057
  @param private_fn: Callable for constructing a private data field for a
2058
    modification
2059
  @rtype: list
2060

2061
  """
2062
  if private_fn is None:
2063
    fn = lambda: None
2064
  else:
2065
    fn = private_fn
2066

    
2067
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2068

    
2069

    
2070
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2071
  """Checks if nodes have enough physical CPUs
2072

2073
  This function checks if all given nodes have the needed number of
2074
  physical CPUs. In case any node has less CPUs or we cannot get the
2075
  information from the node, this function raises an OpPrereqError
2076
  exception.
2077

2078
  @type lu: C{LogicalUnit}
2079
  @param lu: a logical unit from which we get configuration data
2080
  @type node_uuids: C{list}
2081
  @param node_uuids: the list of node UUIDs to check
2082
  @type requested: C{int}
2083
  @param requested: the minimum acceptable number of physical CPUs
2084
  @type hypervisor_specs: list of pairs (string, dict of strings)
2085
  @param hypervisor_specs: list of hypervisor specifications in
2086
      pairs (hypervisor_name, hvparams)
2087
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2088
      or we cannot check the node
2089

2090
  """
2091
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2092
  for node_uuid in node_uuids:
2093
    info = nodeinfo[node_uuid]
2094
    node_name = lu.cfg.GetNodeName(node_uuid)
2095
    info.Raise("Cannot get current information from node %s" % node_name,
2096
               prereq=True, ecode=errors.ECODE_ENVIRON)
2097
    (_, _, (hv_info, )) = info.payload
2098
    num_cpus = hv_info.get("cpu_total", None)
2099
    if not isinstance(num_cpus, int):
2100
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2101
                                 " on node %s, result was '%s'" %
2102
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2103
    if requested > num_cpus:
2104
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2105
                                 "required" % (node_name, num_cpus, requested),
2106
                                 errors.ECODE_NORES)
2107

    
2108

    
2109
def GetItemFromContainer(identifier, kind, container):
2110
  """Return the item refered by the identifier.
2111

2112
  @type identifier: string
2113
  @param identifier: Item index or name or UUID
2114
  @type kind: string
2115
  @param kind: One-word item description
2116
  @type container: list
2117
  @param container: Container to get the item from
2118

2119
  """
2120
  # Index
2121
  try:
2122
    idx = int(identifier)
2123
    if idx == -1:
2124
      # Append
2125
      absidx = len(container) - 1
2126
    elif idx < 0:
2127
      raise IndexError("Not accepting negative indices other than -1")
2128
    elif idx > len(container):
2129
      raise IndexError("Got %s index %s, but there are only %s" %
2130
                       (kind, idx, len(container)))
2131
    else:
2132
      absidx = idx
2133
    return (absidx, container[idx])
2134
  except ValueError:
2135
    pass
2136

    
2137
  for idx, item in enumerate(container):
2138
    if item.uuid == identifier or item.name == identifier:
2139
      return (idx, item)
2140

    
2141
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2142
                             (kind, identifier), errors.ECODE_NOENT)
2143

    
2144

    
2145
def _ApplyContainerMods(kind, container, chgdesc, mods,
2146
                        create_fn, modify_fn, remove_fn):
2147
  """Applies descriptions in C{mods} to C{container}.
2148

2149
  @type kind: string
2150
  @param kind: One-word item description
2151
  @type container: list
2152
  @param container: Container to modify
2153
  @type chgdesc: None or list
2154
  @param chgdesc: List of applied changes
2155
  @type mods: list
2156
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2157
  @type create_fn: callable
2158
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2159
    receives absolute item index, parameters and private data object as added
2160
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2161
    as list
2162
  @type modify_fn: callable
2163
  @param modify_fn: Callback for modifying an existing item
2164
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2165
    and private data object as added by L{_PrepareContainerMods}, returns
2166
    changes as list
2167
  @type remove_fn: callable
2168
  @param remove_fn: Callback on removing item; receives absolute item index,
2169
    item and private data object as added by L{_PrepareContainerMods}
2170

2171
  """
2172
  for (op, identifier, params, private) in mods:
2173
    changes = None
2174

    
2175
    if op == constants.DDM_ADD:
2176
      # Calculate where item will be added
2177
      # When adding an item, identifier can only be an index
2178
      try:
2179
        idx = int(identifier)
2180
      except ValueError:
2181
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2182
                                   " identifier for %s" % constants.DDM_ADD,
2183
                                   errors.ECODE_INVAL)
2184
      if idx == -1:
2185
        addidx = len(container)
2186
      else:
2187
        if idx < 0:
2188
          raise IndexError("Not accepting negative indices other than -1")
2189
        elif idx > len(container):
2190
          raise IndexError("Got %s index %s, but there are only %s" %
2191
                           (kind, idx, len(container)))
2192
        addidx = idx
2193

    
2194
      if create_fn is None:
2195
        item = params
2196
      else:
2197
        (item, changes) = create_fn(addidx, params, private)
2198

    
2199
      if idx == -1:
2200
        container.append(item)
2201
      else:
2202
        assert idx >= 0
2203
        assert idx <= len(container)
2204
        # list.insert does so before the specified index
2205
        container.insert(idx, item)
2206
    else:
2207
      # Retrieve existing item
2208
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2209

    
2210
      if op == constants.DDM_REMOVE:
2211
        assert not params
2212

    
2213
        if remove_fn is not None:
2214
          remove_fn(absidx, item, private)
2215

    
2216
        changes = [("%s/%s" % (kind, absidx), "remove")]
2217

    
2218
        assert container[absidx] == item
2219
        del container[absidx]
2220
      elif op == constants.DDM_MODIFY:
2221
        if modify_fn is not None:
2222
          changes = modify_fn(absidx, item, params, private)
2223
      else:
2224
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2225

    
2226
    assert _TApplyContModsCbChanges(changes)
2227

    
2228
    if not (chgdesc is None or changes is None):
2229
      chgdesc.extend(changes)
2230

    
2231

    
2232
def _UpdateIvNames(base_index, disks):
2233
  """Updates the C{iv_name} attribute of disks.
2234

2235
  @type disks: list of L{objects.Disk}
2236

2237
  """
2238
  for (idx, disk) in enumerate(disks):
2239
    disk.iv_name = "disk/%s" % (base_index + idx, )
2240

    
2241

    
2242
class LUInstanceSetParams(LogicalUnit):
2243
  """Modifies an instances's parameters.
2244

2245
  """
2246
  HPATH = "instance-modify"
2247
  HTYPE = constants.HTYPE_INSTANCE
2248
  REQ_BGL = False
2249

    
2250
  @staticmethod
2251
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2252
    assert ht.TList(mods)
2253
    assert not mods or len(mods[0]) in (2, 3)
2254

    
2255
    if mods and len(mods[0]) == 2:
2256
      result = []
2257

    
2258
      addremove = 0
2259
      for op, params in mods:
2260
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2261
          result.append((op, -1, params))
2262
          addremove += 1
2263

    
2264
          if addremove > 1:
2265
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2266
                                       " supported at a time" % kind,
2267
                                       errors.ECODE_INVAL)
2268
        else:
2269
          result.append((constants.DDM_MODIFY, op, params))
2270

    
2271
      assert verify_fn(result)
2272
    else:
2273
      result = mods
2274

    
2275
    return result
2276

    
2277
  @staticmethod
2278
  def _CheckMods(kind, mods, key_types, item_fn):
2279
    """Ensures requested disk/NIC modifications are valid.
2280

2281
    """
2282
    for (op, _, params) in mods:
2283
      assert ht.TDict(params)
2284

    
2285
      # If 'key_types' is an empty dict, we assume we have an
2286
      # 'ext' template and thus do not ForceDictType
2287
      if key_types:
2288
        utils.ForceDictType(params, key_types)
2289

    
2290
      if op == constants.DDM_REMOVE:
2291
        if params:
2292
          raise errors.OpPrereqError("No settings should be passed when"
2293
                                     " removing a %s" % kind,
2294
                                     errors.ECODE_INVAL)
2295
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2296
        item_fn(op, params)
2297
      else:
2298
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2299

    
2300
  @staticmethod
2301
  def _VerifyDiskModification(op, params, excl_stor):
2302
    """Verifies a disk modification.
2303

2304
    """
2305
    if op == constants.DDM_ADD:
2306
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2307
      if mode not in constants.DISK_ACCESS_SET:
2308
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2309
                                   errors.ECODE_INVAL)
2310

    
2311
      size = params.get(constants.IDISK_SIZE, None)
2312
      if size is None:
2313
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2314
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2315
      size = int(size)
2316

    
2317
      params[constants.IDISK_SIZE] = size
2318
      name = params.get(constants.IDISK_NAME, None)
2319
      if name is not None and name.lower() == constants.VALUE_NONE:
2320
        params[constants.IDISK_NAME] = None
2321

    
2322
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2323

    
2324
    elif op == constants.DDM_MODIFY:
2325
      if constants.IDISK_SIZE in params:
2326
        raise errors.OpPrereqError("Disk size change not possible, use"
2327
                                   " grow-disk", errors.ECODE_INVAL)
2328
      if len(params) > 2:
2329
        raise errors.OpPrereqError("Disk modification doesn't support"
2330
                                   " additional arbitrary parameters",
2331
                                   errors.ECODE_INVAL)
2332
      name = params.get(constants.IDISK_NAME, None)
2333
      if name is not None and name.lower() == constants.VALUE_NONE:
2334
        params[constants.IDISK_NAME] = None
2335

    
2336
  @staticmethod
2337
  def _VerifyNicModification(op, params):
2338
    """Verifies a network interface modification.
2339

2340
    """
2341
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2342
      ip = params.get(constants.INIC_IP, None)
2343
      name = params.get(constants.INIC_NAME, None)
2344
      req_net = params.get(constants.INIC_NETWORK, None)
2345
      link = params.get(constants.NIC_LINK, None)
2346
      mode = params.get(constants.NIC_MODE, None)
2347
      if name is not None and name.lower() == constants.VALUE_NONE:
2348
        params[constants.INIC_NAME] = None
2349
      if req_net is not None:
2350
        if req_net.lower() == constants.VALUE_NONE:
2351
          params[constants.INIC_NETWORK] = None
2352
          req_net = None
2353
        elif link is not None or mode is not None:
2354
          raise errors.OpPrereqError("If network is given"
2355
                                     " mode or link should not",
2356
                                     errors.ECODE_INVAL)
2357

    
2358
      if op == constants.DDM_ADD:
2359
        macaddr = params.get(constants.INIC_MAC, None)
2360
        if macaddr is None:
2361
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2362

    
2363
      if ip is not None:
2364
        if ip.lower() == constants.VALUE_NONE:
2365
          params[constants.INIC_IP] = None
2366
        else:
2367
          if ip.lower() == constants.NIC_IP_POOL:
2368
            if op == constants.DDM_ADD and req_net is None:
2369
              raise errors.OpPrereqError("If ip=pool, parameter network"
2370
                                         " cannot be none",
2371
                                         errors.ECODE_INVAL)
2372
          else:
2373
            if not netutils.IPAddress.IsValid(ip):
2374
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2375
                                         errors.ECODE_INVAL)
2376

    
2377
      if constants.INIC_MAC in params:
2378
        macaddr = params[constants.INIC_MAC]
2379
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2380
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2381

    
2382
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2383
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2384
                                     " modifying an existing NIC",
2385
                                     errors.ECODE_INVAL)
2386

    
2387
  def CheckArguments(self):
2388
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2389
            self.op.hvparams or self.op.beparams or self.op.os_name or
2390
            self.op.osparams or self.op.offline is not None or
2391
            self.op.runtime_mem or self.op.pnode):
2392
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2393

    
2394
    if self.op.hvparams:
2395
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2396
                           "hypervisor", "instance", "cluster")
2397

    
2398
    self.op.disks = self._UpgradeDiskNicMods(
2399
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2400
    self.op.nics = self._UpgradeDiskNicMods(
2401
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2402

    
2403
    if self.op.disks and self.op.disk_template is not None:
2404
      raise errors.OpPrereqError("Disk template conversion and other disk"
2405
                                 " changes not supported at the same time",
2406
                                 errors.ECODE_INVAL)
2407

    
2408
    if (self.op.disk_template and
2409
        self.op.disk_template in constants.DTS_INT_MIRROR and
2410
        self.op.remote_node is None):
2411
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2412
                                 " one requires specifying a secondary node",
2413
                                 errors.ECODE_INVAL)
2414

    
2415
    # Check NIC modifications
2416
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2417
                    self._VerifyNicModification)
2418

    
2419
    if self.op.pnode:
2420
      (self.op.pnode_uuid, self.op.pnode) = \
2421
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2422

    
2423
  def ExpandNames(self):
2424
    self._ExpandAndLockInstance()
2425
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2426
    # Can't even acquire node locks in shared mode as upcoming changes in
2427
    # Ganeti 2.6 will start to modify the node object on disk conversion
2428
    self.needed_locks[locking.LEVEL_NODE] = []
2429
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2430
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2431
    # Look node group to look up the ipolicy
2432
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2433

    
2434
  def DeclareLocks(self, level):
2435
    if level == locking.LEVEL_NODEGROUP:
2436
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2437
      # Acquire locks for the instance's nodegroups optimistically. Needs
2438
      # to be verified in CheckPrereq
2439
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2440
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2441
    elif level == locking.LEVEL_NODE:
2442
      self._LockInstancesNodes()
2443
      if self.op.disk_template and self.op.remote_node:
2444
        (self.op.remote_node_uuid, self.op.remote_node) = \
2445
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2446
                                self.op.remote_node)
2447
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2448
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2449
      # Copy node locks
2450
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2451
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2452

    
2453
  def BuildHooksEnv(self):
2454
    """Build hooks env.
2455

2456
    This runs on the master, primary and secondaries.
2457

2458
    """
2459
    args = {}
2460
    if constants.BE_MINMEM in self.be_new:
2461
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2462
    if constants.BE_MAXMEM in self.be_new:
2463
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2464
    if constants.BE_VCPUS in self.be_new:
2465
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2466
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2467
    # information at all.
2468

    
2469
    if self._new_nics is not None:
2470
      nics = []
2471

    
2472
      for nic in self._new_nics:
2473
        n = copy.deepcopy(nic)
2474
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2475
        n.nicparams = nicparams
2476
        nics.append(NICToTuple(self, n))
2477

    
2478
      args["nics"] = nics
2479

    
2480
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2481
    if self.op.disk_template:
2482
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2483
    if self.op.runtime_mem:
2484
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2485

    
2486
    return env
2487

    
2488
  def BuildHooksNodes(self):
2489
    """Build hooks nodes.
2490

2491
    """
2492
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2493
    return (nl, nl)
2494

    
2495
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2496
                              old_params, cluster, pnode_uuid):
2497

    
2498
    update_params_dict = dict([(key, params[key])
2499
                               for key in constants.NICS_PARAMETERS
2500
                               if key in params])
2501

    
2502
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2503
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2504

    
2505
    new_net_uuid = None
2506
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2507
    if new_net_uuid_or_name:
2508
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2509
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2510

    
2511
    if old_net_uuid:
2512
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2513

    
2514
    if new_net_uuid:
2515
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2516
      if not netparams:
2517
        raise errors.OpPrereqError("No netparams found for the network"
2518
                                   " %s, probably not connected" %
2519
                                   new_net_obj.name, errors.ECODE_INVAL)
2520
      new_params = dict(netparams)
2521
    else:
2522
      new_params = GetUpdatedParams(old_params, update_params_dict)
2523

    
2524
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2525

    
2526
    new_filled_params = cluster.SimpleFillNIC(new_params)
2527
    objects.NIC.CheckParameterSyntax(new_filled_params)
2528

    
2529
    new_mode = new_filled_params[constants.NIC_MODE]
2530
    if new_mode == constants.NIC_MODE_BRIDGED:
2531
      bridge = new_filled_params[constants.NIC_LINK]
2532
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2533
      if msg:
2534
        msg = "Error checking bridges on node '%s': %s" % \
2535
                (self.cfg.GetNodeName(pnode_uuid), msg)
2536
        if self.op.force:
2537
          self.warn.append(msg)
2538
        else:
2539
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2540

    
2541
    elif new_mode == constants.NIC_MODE_ROUTED:
2542
      ip = params.get(constants.INIC_IP, old_ip)
2543
      if ip is None:
2544
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2545
                                   " on a routed NIC", errors.ECODE_INVAL)
2546

    
2547
    elif new_mode == constants.NIC_MODE_OVS:
2548
      # TODO: check OVS link
2549
      self.LogInfo("OVS links are currently not checked for correctness")
2550

    
2551
    if constants.INIC_MAC in params:
2552
      mac = params[constants.INIC_MAC]
2553
      if mac is None:
2554
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2555
                                   errors.ECODE_INVAL)
2556
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2557
        # otherwise generate the MAC address
2558
        params[constants.INIC_MAC] = \
2559
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2560
      else:
2561
        # or validate/reserve the current one
2562
        try:
2563
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2564
        except errors.ReservationError:
2565
          raise errors.OpPrereqError("MAC address '%s' already in use"
2566
                                     " in cluster" % mac,
2567
                                     errors.ECODE_NOTUNIQUE)
2568
    elif new_net_uuid != old_net_uuid:
2569

    
2570
      def get_net_prefix(net_uuid):
2571
        mac_prefix = None
2572
        if net_uuid:
2573
          nobj = self.cfg.GetNetwork(net_uuid)
2574
          mac_prefix = nobj.mac_prefix
2575

    
2576
        return mac_prefix
2577

    
2578
      new_prefix = get_net_prefix(new_net_uuid)
2579
      old_prefix = get_net_prefix(old_net_uuid)
2580
      if old_prefix != new_prefix:
2581
        params[constants.INIC_MAC] = \
2582
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2583

    
2584
    # if there is a change in (ip, network) tuple
2585
    new_ip = params.get(constants.INIC_IP, old_ip)
2586
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2587
      if new_ip:
2588
        # if IP is pool then require a network and generate one IP
2589
        if new_ip.lower() == constants.NIC_IP_POOL:
2590
          if new_net_uuid:
2591
            try:
2592
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2593
            except errors.ReservationError:
2594
              raise errors.OpPrereqError("Unable to get a free IP"
2595
                                         " from the address pool",
2596
                                         errors.ECODE_STATE)
2597
            self.LogInfo("Chose IP %s from network %s",
2598
                         new_ip,
2599
                         new_net_obj.name)
2600
            params[constants.INIC_IP] = new_ip
2601
          else:
2602
            raise errors.OpPrereqError("ip=pool, but no network found",
2603
                                       errors.ECODE_INVAL)
2604
        # Reserve new IP if in the new network if any
2605
        elif new_net_uuid:
2606
          try:
2607
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2608
            self.LogInfo("Reserving IP %s in network %s",
2609
                         new_ip, new_net_obj.name)
2610
          except errors.ReservationError:
2611
            raise errors.OpPrereqError("IP %s not available in network %s" %
2612
                                       (new_ip, new_net_obj.name),
2613
                                       errors.ECODE_NOTUNIQUE)
2614
        # new network is None so check if new IP is a conflicting IP
2615
        elif self.op.conflicts_check:
2616
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2617

    
2618
      # release old IP if old network is not None
2619
      if old_ip and old_net_uuid:
2620
        try:
2621
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2622
        except errors.AddressPoolError:
2623
          logging.warning("Release IP %s not contained in network %s",
2624
                          old_ip, old_net_obj.name)
2625

    
2626
    # there are no changes in (ip, network) tuple and old network is not None
2627
    elif (old_net_uuid is not None and
2628
          (req_link is not None or req_mode is not None)):
2629
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2630
                                 " a NIC that is connected to a network",
2631
                                 errors.ECODE_INVAL)
2632

    
2633
    private.params = new_params
2634
    private.filled = new_filled_params
2635

    
2636
  def _PreCheckDiskTemplate(self, pnode_info):
2637
    """CheckPrereq checks related to a new disk template."""
2638
    # Arguments are passed to avoid configuration lookups
2639
    pnode_uuid = self.instance.primary_node
2640
    if self.instance.disk_template == self.op.disk_template:
2641
      raise errors.OpPrereqError("Instance already has disk template %s" %
2642
                                 self.instance.disk_template,
2643
                                 errors.ECODE_INVAL)
2644

    
2645
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2646
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2647
                                 " cluster." % self.op.disk_template)
2648

    
2649
    if (self.instance.disk_template,
2650
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2651
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2652
                                 " %s to %s" % (self.instance.disk_template,
2653
                                                self.op.disk_template),
2654
                                 errors.ECODE_INVAL)
2655
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2656
                       msg="cannot change disk template")
2657
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2658
      if self.op.remote_node_uuid == pnode_uuid:
2659
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2660
                                   " as the primary node of the instance" %
2661
                                   self.op.remote_node, errors.ECODE_STATE)
2662
      CheckNodeOnline(self, self.op.remote_node_uuid)
2663
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2664
      # FIXME: here we assume that the old instance type is DT_PLAIN
2665
      assert self.instance.disk_template == constants.DT_PLAIN
2666
      disks = [{constants.IDISK_SIZE: d.size,
2667
                constants.IDISK_VG: d.logical_id[0]}
2668
               for d in self.instance.disks]
2669
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2670
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2671

    
2672
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2673
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2674
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2675
                                                              snode_group)
2676
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2677
                             ignore=self.op.ignore_ipolicy)
2678
      if pnode_info.group != snode_info.group:
2679
        self.LogWarning("The primary and secondary nodes are in two"
2680
                        " different node groups; the disk parameters"
2681
                        " from the first disk's node group will be"
2682
                        " used")
2683

    
2684
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2685
      # Make sure none of the nodes require exclusive storage
2686
      nodes = [pnode_info]
2687
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2688
        assert snode_info
2689
        nodes.append(snode_info)
2690
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2691
      if compat.any(map(has_es, nodes)):
2692
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2693
                  " storage is enabled" % (self.instance.disk_template,
2694
                                           self.op.disk_template))
2695
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2696

    
2697
  def _PreCheckDisks(self, ispec):
2698
    """CheckPrereq checks related to disk changes.
2699

2700
    @type ispec: dict
2701
    @param ispec: instance specs to be updated with the new disks
2702

2703
    """
2704
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2705

    
2706
    excl_stor = compat.any(
2707
      rpc.GetExclusiveStorageForNodes(self.cfg,
2708
                                      self.instance.all_nodes).values()
2709
      )
2710

    
2711
    # Check disk modifications. This is done here and not in CheckArguments
2712
    # (as with NICs), because we need to know the instance's disk template
2713
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2714
    if self.instance.disk_template == constants.DT_EXT:
2715
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2716
    else:
2717
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2718
                      ver_fn)
2719

    
2720
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2721

    
2722
    # Check the validity of the `provider' parameter
2723
    if self.instance.disk_template in constants.DT_EXT:
2724
      for mod in self.diskmod:
2725
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2726
        if mod[0] == constants.DDM_ADD:
2727
          if ext_provider is None:
2728
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2729
                                       " '%s' missing, during disk add" %
2730
                                       (constants.DT_EXT,
2731
                                        constants.IDISK_PROVIDER),
2732
                                       errors.ECODE_NOENT)
2733
        elif mod[0] == constants.DDM_MODIFY:
2734
          if ext_provider:
2735
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2736
                                       " modification" %
2737
                                       constants.IDISK_PROVIDER,
2738
                                       errors.ECODE_INVAL)
2739
    else:
2740
      for mod in self.diskmod:
2741
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2742
        if ext_provider is not None:
2743
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2744
                                     " instances of type '%s'" %
2745
                                     (constants.IDISK_PROVIDER,
2746
                                      constants.DT_EXT),
2747
                                     errors.ECODE_INVAL)
2748

    
2749
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2750
      raise errors.OpPrereqError("Disk operations not supported for"
2751
                                 " diskless instances", errors.ECODE_INVAL)
2752

    
2753
    def _PrepareDiskMod(_, disk, params, __):
2754
      disk.name = params.get(constants.IDISK_NAME, None)
2755

    
2756
    # Verify disk changes (operating on a copy)
2757
    disks = copy.deepcopy(self.instance.disks)
2758
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2759
                        _PrepareDiskMod, None)
2760
    utils.ValidateDeviceNames("disk", disks)
2761
    if len(disks) > constants.MAX_DISKS:
2762
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2763
                                 " more" % constants.MAX_DISKS,
2764
                                 errors.ECODE_STATE)
2765
    disk_sizes = [disk.size for disk in self.instance.disks]
2766
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2767
                      self.diskmod if op == constants.DDM_ADD)
2768
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2769
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2770

    
2771
    if self.op.offline is not None and self.op.offline:
2772
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2773
                         msg="can't change to offline")
2774

    
2775
  def CheckPrereq(self):
2776
    """Check prerequisites.
2777

2778
    This only checks the instance list against the existing names.
2779

2780
    """
2781
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2782
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2783
    self.cluster = self.cfg.GetClusterInfo()
2784

    
2785
    assert self.instance is not None, \
2786
      "Cannot retrieve locked instance %s" % self.op.instance_name
2787

    
2788
    pnode_uuid = self.instance.primary_node
2789

    
2790
    self.warn = []
2791

    
2792
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2793
        not self.op.force):
2794
      # verify that the instance is not up
2795
      instance_info = self.rpc.call_instance_info(
2796
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2797
          self.instance.hvparams)
2798
      if instance_info.fail_msg:
2799
        self.warn.append("Can't get instance runtime information: %s" %
2800
                         instance_info.fail_msg)
2801
      elif instance_info.payload:
2802
        raise errors.OpPrereqError("Instance is still running on %s" %
2803
                                   self.cfg.GetNodeName(pnode_uuid),
2804
                                   errors.ECODE_STATE)
2805

    
2806
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2807
    node_uuids = list(self.instance.all_nodes)
2808
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2809

    
2810
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2811
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2812
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2813

    
2814
    # dictionary with instance information after the modification
2815
    ispec = {}
2816

    
2817
    # Prepare NIC modifications
2818
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2819

    
2820
    # OS change
2821
    if self.op.os_name and not self.op.force:
2822
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2823
                     self.op.force_variant)
2824
      instance_os = self.op.os_name
2825
    else:
2826
      instance_os = self.instance.os
2827

    
2828
    assert not (self.op.disk_template and self.op.disks), \
2829
      "Can't modify disk template and apply disk changes at the same time"
2830

    
2831
    if self.op.disk_template:
2832
      self._PreCheckDiskTemplate(pnode_info)
2833

    
2834
    self._PreCheckDisks(ispec)
2835

    
2836
    # hvparams processing
2837
    if self.op.hvparams:
2838
      hv_type = self.instance.hypervisor
2839
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2840
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2841
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2842

    
2843
      # local check
2844
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2845
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2846
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2847
      self.hv_inst = i_hvdict # the new dict (without defaults)
2848
    else:
2849
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2850
                                                   self.instance.os,
2851
                                                   self.instance.hvparams)
2852
      self.hv_new = self.hv_inst = {}
2853

    
2854
    # beparams processing
2855
    if self.op.beparams:
2856
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2857
                                  use_none=True)
2858
      objects.UpgradeBeParams(i_bedict)
2859
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2860
      be_new = self.cluster.SimpleFillBE(i_bedict)
2861
      self.be_proposed = self.be_new = be_new # the new actual values
2862
      self.be_inst = i_bedict # the new dict (without defaults)
2863
    else:
2864
      self.be_new = self.be_inst = {}
2865
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2866
    be_old = self.cluster.FillBE(self.instance)
2867

    
2868
    # CPU param validation -- checking every time a parameter is
2869
    # changed to cover all cases where either CPU mask or vcpus have
2870
    # changed
2871
    if (constants.BE_VCPUS in self.be_proposed and
2872
        constants.HV_CPU_MASK in self.hv_proposed):
2873
      cpu_list = \
2874
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2875
      # Verify mask is consistent with number of vCPUs. Can skip this
2876
      # test if only 1 entry in the CPU mask, which means same mask
2877
      # is applied to all vCPUs.
2878
      if (len(cpu_list) > 1 and
2879
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2880
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2881
                                   " CPU mask [%s]" %
2882
                                   (self.be_proposed[constants.BE_VCPUS],
2883
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2884
                                   errors.ECODE_INVAL)
2885

    
2886
      # Only perform this test if a new CPU mask is given
2887
      if constants.HV_CPU_MASK in self.hv_new:
2888
        # Calculate the largest CPU number requested
2889
        max_requested_cpu = max(map(max, cpu_list))
2890
        # Check that all of the instance's nodes have enough physical CPUs to
2891
        # satisfy the requested CPU mask
2892
        hvspecs = [(self.instance.hypervisor,
2893
                    self.cfg.GetClusterInfo()
2894
                      .hvparams[self.instance.hypervisor])]
2895
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2896
                                max_requested_cpu + 1,
2897
                                hvspecs)
2898

    
2899
    # osparams processing
2900
    if self.op.osparams:
2901
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2902
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2903
      self.os_inst = i_osdict # the new dict (without defaults)
2904
    else:
2905
      self.os_inst = {}
2906

    
2907
    #TODO(dynmem): do the appropriate check involving MINMEM
2908
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2909
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2910
      mem_check_list = [pnode_uuid]
2911
      if be_new[constants.BE_AUTO_BALANCE]:
2912
        # either we changed auto_balance to yes or it was from before
2913
        mem_check_list.extend(self.instance.secondary_nodes)
2914
      instance_info = self.rpc.call_instance_info(
2915
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2916
          self.instance.hvparams)
2917
      hvspecs = [(self.instance.hypervisor,
2918
                  self.cluster.hvparams[self.instance.hypervisor])]
2919
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2920
                                         hvspecs)
2921
      pninfo = nodeinfo[pnode_uuid]
2922
      msg = pninfo.fail_msg
2923
      if msg:
2924
        # Assume the primary node is unreachable and go ahead
2925
        self.warn.append("Can't get info from primary node %s: %s" %
2926
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2927
      else:
2928
        (_, _, (pnhvinfo, )) = pninfo.payload
2929
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2930
          self.warn.append("Node data from primary node %s doesn't contain"
2931
                           " free memory information" %
2932
                           self.cfg.GetNodeName(pnode_uuid))
2933
        elif instance_info.fail_msg:
2934
          self.warn.append("Can't get instance runtime information: %s" %
2935
                           instance_info.fail_msg)
2936
        else:
2937
          if instance_info.payload:
2938
            current_mem = int(instance_info.payload["memory"])
2939
          else:
2940
            # Assume instance not running
2941
            # (there is a slight race condition here, but it's not very
2942
            # probable, and we have no other way to check)
2943
            # TODO: Describe race condition
2944
            current_mem = 0
2945
          #TODO(dynmem): do the appropriate check involving MINMEM
2946
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2947
                      pnhvinfo["memory_free"])
2948
          if miss_mem > 0:
2949
            raise errors.OpPrereqError("This change will prevent the instance"
2950
                                       " from starting, due to %d MB of memory"
2951
                                       " missing on its primary node" %
2952
                                       miss_mem, errors.ECODE_NORES)
2953

    
2954
      if be_new[constants.BE_AUTO_BALANCE]:
2955
        for node_uuid, nres in nodeinfo.items():
2956
          if node_uuid not in self.instance.secondary_nodes:
2957
            continue
2958
          nres.Raise("Can't get info from secondary node %s" %
2959
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2960
                     ecode=errors.ECODE_STATE)
2961
          (_, _, (nhvinfo, )) = nres.payload
2962
          if not isinstance(nhvinfo.get("memory_free", None), int):
2963
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2964
                                       " memory information" %
2965
                                       self.cfg.GetNodeName(node_uuid),
2966
                                       errors.ECODE_STATE)
2967
          #TODO(dynmem): do the appropriate check involving MINMEM
2968
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2969
            raise errors.OpPrereqError("This change will prevent the instance"
2970
                                       " from failover to its secondary node"
2971
                                       " %s, due to not enough memory" %
2972
                                       self.cfg.GetNodeName(node_uuid),
2973
                                       errors.ECODE_STATE)
2974

    
2975
    if self.op.runtime_mem:
2976
      remote_info = self.rpc.call_instance_info(
2977
         self.instance.primary_node, self.instance.name,
2978
         self.instance.hypervisor,
2979
         self.cluster.hvparams[self.instance.hypervisor])
2980
      remote_info.Raise("Error checking node %s" %
2981
                        self.cfg.GetNodeName(self.instance.primary_node))
2982
      if not remote_info.payload: # not running already
2983
        raise errors.OpPrereqError("Instance %s is not running" %
2984
                                   self.instance.name, errors.ECODE_STATE)
2985

    
2986
      current_memory = remote_info.payload["memory"]
2987
      if (not self.op.force and
2988
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2989
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2990
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2991
                                   " and %d MB of memory unless --force is"
2992
                                   " given" %
2993
                                   (self.instance.name,
2994
                                    self.be_proposed[constants.BE_MINMEM],
2995
                                    self.be_proposed[constants.BE_MAXMEM]),
2996
                                   errors.ECODE_INVAL)
2997

    
2998
      delta = self.op.runtime_mem - current_memory
2999
      if delta > 0:
3000
        CheckNodeFreeMemory(
3001
            self, self.instance.primary_node,
3002
            "ballooning memory for instance %s" % self.instance.name, delta,
3003
            self.instance.hypervisor,
3004
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3005

    
3006
    # make self.cluster visible in the functions below
3007
    cluster = self.cluster
3008

    
3009
    def _PrepareNicCreate(_, params, private):
3010
      self._PrepareNicModification(params, private, None, None,
3011
                                   {}, cluster, pnode_uuid)
3012
      return (None, None)
3013

    
3014
    def _PrepareNicMod(_, nic, params, private):
3015
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3016
                                   nic.nicparams, cluster, pnode_uuid)
3017
      return None
3018

    
3019
    def _PrepareNicRemove(_, params, __):
3020
      ip = params.ip
3021
      net = params.network
3022
      if net is not None and ip is not None:
3023
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3024

    
3025
    # Verify NIC changes (operating on copy)
3026
    nics = self.instance.nics[:]
3027
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3028
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3029
    if len(nics) > constants.MAX_NICS:
3030
      raise errors.OpPrereqError("Instance has too many network interfaces"
3031
                                 " (%d), cannot add more" % constants.MAX_NICS,
3032
                                 errors.ECODE_STATE)
3033

    
3034
    # Pre-compute NIC changes (necessary to use result in hooks)
3035
    self._nic_chgdesc = []
3036
    if self.nicmod:
3037
      # Operate on copies as this is still in prereq
3038
      nics = [nic.Copy() for nic in self.instance.nics]
3039
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3040
                          self._CreateNewNic, self._ApplyNicMods, None)
3041
      # Verify that NIC names are unique and valid
3042
      utils.ValidateDeviceNames("NIC", nics)
3043
      self._new_nics = nics
3044
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3045
    else:
3046
      self._new_nics = None
3047
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3048

    
3049
    if not self.op.ignore_ipolicy:
3050
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3051
                                                              group_info)
3052

    
3053
      # Fill ispec with backend parameters
3054
      ispec[constants.ISPEC_SPINDLE_USE] = \
3055
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3056
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3057
                                                         None)
3058

    
3059
      # Copy ispec to verify parameters with min/max values separately
3060
      if self.op.disk_template:
3061
        new_disk_template = self.op.disk_template
3062
      else:
3063
        new_disk_template = self.instance.disk_template
3064
      ispec_max = ispec.copy()
3065
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3066
        self.be_new.get(constants.BE_MAXMEM, None)
3067
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3068
                                                     new_disk_template)
3069
      ispec_min = ispec.copy()
3070
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3071
        self.be_new.get(constants.BE_MINMEM, None)
3072
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3073
                                                     new_disk_template)
3074

    
3075
      if (res_max or res_min):
3076
        # FIXME: Improve error message by including information about whether
3077
        # the upper or lower limit of the parameter fails the ipolicy.
3078
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3079
               (group_info, group_info.name,
3080
                utils.CommaJoin(set(res_max + res_min))))
3081
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3082

    
3083
  def _ConvertPlainToDrbd(self, feedback_fn):
3084
    """Converts an instance from plain to drbd.
3085

3086
    """
3087
    feedback_fn("Converting template to drbd")
3088
    pnode_uuid = self.instance.primary_node
3089
    snode_uuid = self.op.remote_node_uuid
3090

    
3091
    assert self.instance.disk_template == constants.DT_PLAIN
3092

    
3093
    # create a fake disk info for _GenerateDiskTemplate
3094
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3095
                  constants.IDISK_VG: d.logical_id[0],
3096
                  constants.IDISK_NAME: d.name}
3097
                 for d in self.instance.disks]
3098
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3099
                                     self.instance.uuid, pnode_uuid,
3100
                                     [snode_uuid], disk_info, None, None, 0,
3101
                                     feedback_fn, self.diskparams)
3102
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3103
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3104
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3105
    info = GetInstanceInfoText(self.instance)
3106
    feedback_fn("Creating additional volumes...")
3107
    # first, create the missing data and meta devices
3108
    for disk in anno_disks:
3109
      # unfortunately this is... not too nice
3110
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3111
                           info, True, p_excl_stor)
3112
      for child in disk.children:
3113
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3114
                             s_excl_stor)
3115
    # at this stage, all new LVs have been created, we can rename the
3116
    # old ones
3117
    feedback_fn("Renaming original volumes...")
3118
    rename_list = [(o, n.children[0].logical_id)
3119
                   for (o, n) in zip(self.instance.disks, new_disks)]
3120
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3121
    result.Raise("Failed to rename original LVs")
3122

    
3123
    feedback_fn("Initializing DRBD devices...")
3124
    # all child devices are in place, we can now create the DRBD devices
3125
    try:
3126
      for disk in anno_disks:
3127
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3128
                                       (snode_uuid, s_excl_stor)]:
3129
          f_create = node_uuid == pnode_uuid
3130
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3131
                               f_create, excl_stor)
3132
    except errors.GenericError, e:
3133
      feedback_fn("Initializing of DRBD devices failed;"
3134
                  " renaming back original volumes...")
3135
      for disk in new_disks:
3136
        self.cfg.SetDiskID(disk, pnode_uuid)
3137
      rename_back_list = [(n.children[0], o.logical_id)
3138
                          for (n, o) in zip(new_disks, self.instance.disks)]
3139
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3140
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3141
      raise
3142

    
3143
    # at this point, the instance has been modified
3144
    self.instance.disk_template = constants.DT_DRBD8
3145
    self.instance.disks = new_disks
3146
    self.cfg.Update(self.instance, feedback_fn)
3147

    
3148
    # Release node locks while waiting for sync
3149
    ReleaseLocks(self, locking.LEVEL_NODE)
3150

    
3151
    # disks are created, waiting for sync
3152
    disk_abort = not WaitForSync(self, self.instance,
3153
                                 oneshot=not self.op.wait_for_sync)
3154
    if disk_abort:
3155
      raise errors.OpExecError("There are some degraded disks for"
3156
                               " this instance, please cleanup manually")
3157

    
3158
    # Node resource locks will be released by caller
3159

    
3160
  def _ConvertDrbdToPlain(self, feedback_fn):
3161
    """Converts an instance from drbd to plain.
3162

3163
    """
3164
    assert len(self.instance.secondary_nodes) == 1
3165
    assert self.instance.disk_template == constants.DT_DRBD8
3166

    
3167
    pnode_uuid = self.instance.primary_node
3168
    snode_uuid = self.instance.secondary_nodes[0]
3169
    feedback_fn("Converting template to plain")
3170

    
3171
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3172
    new_disks = [d.children[0] for d in self.instance.disks]
3173

    
3174
    # copy over size, mode and name
3175
    for parent, child in zip(old_disks, new_disks):
3176
      child.size = parent.size
3177
      child.mode = parent.mode
3178
      child.name = parent.name
3179

    
3180
    # this is a DRBD disk, return its port to the pool
3181
    # NOTE: this must be done right before the call to cfg.Update!
3182
    for disk in old_disks:
3183
      tcp_port = disk.logical_id[2]
3184
      self.cfg.AddTcpUdpPort(tcp_port)
3185

    
3186
    # update instance structure
3187
    self.instance.disks = new_disks
3188
    self.instance.disk_template = constants.DT_PLAIN
3189
    _UpdateIvNames(0, self.instance.disks)
3190
    self.cfg.Update(self.instance, feedback_fn)
3191

    
3192
    # Release locks in case removing disks takes a while
3193
    ReleaseLocks(self, locking.LEVEL_NODE)
3194

    
3195
    feedback_fn("Removing volumes on the secondary node...")
3196
    for disk in old_disks:
3197
      self.cfg.SetDiskID(disk, snode_uuid)
3198
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3199
      result.Warn("Could not remove block device %s on node %s,"
3200
                  " continuing anyway" %
3201
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3202
                  self.LogWarning)
3203

    
3204
    feedback_fn("Removing unneeded volumes on the primary node...")
3205
    for idx, disk in enumerate(old_disks):
3206
      meta = disk.children[1]
3207
      self.cfg.SetDiskID(meta, pnode_uuid)
3208
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3209
      result.Warn("Could not remove metadata for disk %d on node %s,"
3210
                  " continuing anyway" %
3211
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3212
                  self.LogWarning)
3213

    
3214
  def _CreateNewDisk(self, idx, params, _):
3215
    """Creates a new disk.
3216

3217
    """
3218
    # add a new disk
3219
    if self.instance.disk_template in constants.DTS_FILEBASED:
3220
      (file_driver, file_path) = self.instance.disks[0].logical_id
3221
      file_path = os.path.dirname(file_path)
3222
    else:
3223
      file_driver = file_path = None
3224

    
3225
    disk = \
3226
      GenerateDiskTemplate(self, self.instance.disk_template,
3227
                           self.instance.uuid, self.instance.primary_node,
3228
                           self.instance.secondary_nodes, [params], file_path,
3229
                           file_driver, idx, self.Log, self.diskparams)[0]
3230

    
3231
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3232

    
3233
    if self.cluster.prealloc_wipe_disks:
3234
      # Wipe new disk
3235
      WipeOrCleanupDisks(self, self.instance,
3236
                         disks=[(idx, disk, 0)],
3237
                         cleanup=new_disks)
3238

    
3239
    return (disk, [
3240
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3241
      ])
3242

    
3243
  @staticmethod
3244
  def _ModifyDisk(idx, disk, params, _):
3245
    """Modifies a disk.
3246

3247
    """
3248
    changes = []
3249
    mode = params.get(constants.IDISK_MODE, None)
3250
    if mode:
3251
      disk.mode = mode
3252
      changes.append(("disk.mode/%d" % idx, disk.mode))
3253

    
3254
    name = params.get(constants.IDISK_NAME, None)
3255
    disk.name = name
3256
    changes.append(("disk.name/%d" % idx, disk.name))
3257

    
3258
    return changes
3259

    
3260
  def _RemoveDisk(self, idx, root, _):
3261
    """Removes a disk.
3262

3263
    """
3264
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3265
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3266
                             self.instance.primary_node):
3267
      self.cfg.SetDiskID(disk, node_uuid)
3268
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3269
              .fail_msg
3270
      if msg:
3271
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3272
                        " continuing anyway", idx,
3273
                        self.cfg.GetNodeName(node_uuid), msg)
3274

    
3275
    # if this is a DRBD disk, return its port to the pool
3276
    if root.dev_type in constants.LDS_DRBD:
3277
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3278

    
3279
  def _CreateNewNic(self, idx, params, private):
3280
    """Creates data structure for a new network interface.
3281

3282
    """
3283
    mac = params[constants.INIC_MAC]
3284
    ip = params.get(constants.INIC_IP, None)
3285
    net = params.get(constants.INIC_NETWORK, None)
3286
    name = params.get(constants.INIC_NAME, None)
3287
    net_uuid = self.cfg.LookupNetwork(net)
3288
    #TODO: not private.filled?? can a nic have no nicparams??
3289
    nicparams = private.filled
3290
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3291
                       nicparams=nicparams)
3292
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3293

    
3294
    return (nobj, [
3295
      ("nic.%d" % idx,
3296
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3297
       (mac, ip, private.filled[constants.NIC_MODE],
3298
       private.filled[constants.NIC_LINK],
3299
       net)),
3300
      ])
3301

    
3302
  def _ApplyNicMods(self, idx, nic, params, private):
3303
    """Modifies a network interface.
3304

3305
    """
3306
    changes = []
3307

    
3308
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3309
      if key in params:
3310
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3311
        setattr(nic, key, params[key])
3312

    
3313
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3314
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3315
    if new_net_uuid != nic.network:
3316
      changes.append(("nic.network/%d" % idx, new_net))
3317
      nic.network = new_net_uuid
3318

    
3319
    if private.filled:
3320
      nic.nicparams = private.filled
3321

    
3322
      for (key, val) in nic.nicparams.items():
3323
        changes.append(("nic.%s/%d" % (key, idx), val))
3324

    
3325
    return changes
3326

    
3327
  def Exec(self, feedback_fn):
3328
    """Modifies an instance.
3329

3330
    All parameters take effect only at the next restart of the instance.
3331

3332
    """
3333
    # Process here the warnings from CheckPrereq, as we don't have a
3334
    # feedback_fn there.
3335
    # TODO: Replace with self.LogWarning
3336
    for warn in self.warn:
3337
      feedback_fn("WARNING: %s" % warn)
3338

    
3339
    assert ((self.op.disk_template is None) ^
3340
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3341
      "Not owning any node resource locks"
3342

    
3343
    result = []
3344

    
3345
    # New primary node
3346
    if self.op.pnode_uuid:
3347
      self.instance.primary_node = self.op.pnode_uuid
3348

    
3349
    # runtime memory
3350
    if self.op.runtime_mem:
3351
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3352
                                                     self.instance,
3353
                                                     self.op.runtime_mem)
3354
      rpcres.Raise("Cannot modify instance runtime memory")
3355
      result.append(("runtime_memory", self.op.runtime_mem))
3356

    
3357
    # Apply disk changes
3358
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3359
                        self._CreateNewDisk, self._ModifyDisk,
3360
                        self._RemoveDisk)
3361
    _UpdateIvNames(0, self.instance.disks)
3362

    
3363
    if self.op.disk_template:
3364
      if __debug__:
3365
        check_nodes = set(self.instance.all_nodes)
3366
        if self.op.remote_node_uuid:
3367
          check_nodes.add(self.op.remote_node_uuid)
3368
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3369
          owned = self.owned_locks(level)
3370
          assert not (check_nodes - owned), \
3371
            ("Not owning the correct locks, owning %r, expected at least %r" %
3372
             (owned, check_nodes))
3373

    
3374
      r_shut = ShutdownInstanceDisks(self, self.instance)
3375
      if not r_shut:
3376
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3377
                                 " proceed with disk template conversion")
3378
      mode = (self.instance.disk_template, self.op.disk_template)
3379
      try:
3380
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3381
      except:
3382
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3383
        raise
3384
      result.append(("disk_template", self.op.disk_template))
3385

    
3386
      assert self.instance.disk_template == self.op.disk_template, \
3387
        ("Expected disk template '%s', found '%s'" %
3388
         (self.op.disk_template, self.instance.disk_template))
3389

    
3390
    # Release node and resource locks if there are any (they might already have
3391
    # been released during disk conversion)
3392
    ReleaseLocks(self, locking.LEVEL_NODE)
3393
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3394

    
3395
    # Apply NIC changes
3396
    if self._new_nics is not None:
3397
      self.instance.nics = self._new_nics
3398
      result.extend(self._nic_chgdesc)
3399

    
3400
    # hvparams changes
3401
    if self.op.hvparams:
3402
      self.instance.hvparams = self.hv_inst
3403
      for key, val in self.op.hvparams.iteritems():
3404
        result.append(("hv/%s" % key, val))
3405

    
3406
    # beparams changes
3407
    if self.op.beparams:
3408
      self.instance.beparams = self.be_inst
3409
      for key, val in self.op.beparams.iteritems():
3410
        result.append(("be/%s" % key, val))
3411

    
3412
    # OS change
3413
    if self.op.os_name:
3414
      self.instance.os = self.op.os_name
3415

    
3416
    # osparams changes
3417
    if self.op.osparams:
3418
      self.instance.osparams = self.os_inst
3419
      for key, val in self.op.osparams.iteritems():
3420
        result.append(("os/%s" % key, val))
3421

    
3422
    if self.op.offline is None:
3423
      # Ignore
3424
      pass
3425
    elif self.op.offline:
3426
      # Mark instance as offline
3427
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3428
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3429
    else:
3430
      # Mark instance as online, but stopped
3431
      self.cfg.MarkInstanceDown(self.instance.uuid)
3432
      result.append(("admin_state", constants.ADMINST_DOWN))
3433

    
3434
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3435

    
3436
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3437
                self.owned_locks(locking.LEVEL_NODE)), \
3438
      "All node locks should have been released by now"
3439

    
3440
    return result
3441

    
3442
  _DISK_CONVERSIONS = {
3443
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3444
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3445
    }
3446

    
3447

    
3448
class LUInstanceChangeGroup(LogicalUnit):
3449
  HPATH = "instance-change-group"
3450
  HTYPE = constants.HTYPE_INSTANCE
3451
  REQ_BGL = False
3452

    
3453
  def ExpandNames(self):
3454
    self.share_locks = ShareAll()
3455

    
3456
    self.needed_locks = {
3457
      locking.LEVEL_NODEGROUP: [],
3458
      locking.LEVEL_NODE: [],
3459
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3460
      }
3461

    
3462
    self._ExpandAndLockInstance()
3463

    
3464
    if self.op.target_groups:
3465
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3466
                                  self.op.target_groups)
3467
    else:
3468
      self.req_target_uuids = None
3469

    
3470
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3471

    
3472
  def DeclareLocks(self, level):
3473
    if level == locking.LEVEL_NODEGROUP:
3474
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3475

    
3476
      if self.req_target_uuids:
3477
        lock_groups = set(self.req_target_uuids)
3478

    
3479
        # Lock all groups used by instance optimistically; this requires going
3480
        # via the node before it's locked, requiring verification later on
3481
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3482
        lock_groups.update(instance_groups)
3483
      else:
3484
        # No target groups, need to lock all of them
3485
        lock_groups = locking.ALL_SET
3486

    
3487
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3488

    
3489
    elif level == locking.LEVEL_NODE:
3490
      if self.req_target_uuids:
3491
        # Lock all nodes used by instances
3492
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3493
        self._LockInstancesNodes()
3494

    
3495
        # Lock all nodes in all potential target groups
3496
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3497
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3498
        member_nodes = [node_uuid
3499
                        for group in lock_groups
3500
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3501
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3502
      else:
3503
        # Lock all nodes as all groups are potential targets
3504
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3505

    
3506
  def CheckPrereq(self):
3507
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3508
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3509
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3510

    
3511
    assert (self.req_target_uuids is None or
3512
            owned_groups.issuperset(self.req_target_uuids))
3513
    assert owned_instance_names == set([self.op.instance_name])
3514

    
3515
    # Get instance information
3516
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3517

    
3518
    # Check if node groups for locked instance are still correct
3519
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3520
      ("Instance %s's nodes changed while we kept the lock" %
3521
       self.op.instance_name)
3522

    
3523
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3524
                                          owned_groups)
3525

    
3526
    if self.req_target_uuids:
3527
      # User requested specific target groups
3528
      self.target_uuids = frozenset(self.req_target_uuids)
3529
    else:
3530
      # All groups except those used by the instance are potential targets
3531
      self.target_uuids = owned_groups - inst_groups
3532

    
3533
    conflicting_groups = self.target_uuids & inst_groups
3534
    if conflicting_groups:
3535
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3536
                                 " used by the instance '%s'" %
3537
                                 (utils.CommaJoin(conflicting_groups),
3538
                                  self.op.instance_name),
3539
                                 errors.ECODE_INVAL)
3540

    
3541
    if not self.target_uuids:
3542
      raise errors.OpPrereqError("There are no possible target groups",
3543
                                 errors.ECODE_INVAL)
3544

    
3545
  def BuildHooksEnv(self):
3546
    """Build hooks env.
3547

3548
    """
3549
    assert self.target_uuids
3550

    
3551
    env = {
3552
      "TARGET_GROUPS": " ".join(self.target_uuids),
3553
      }
3554

    
3555
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3556

    
3557
    return env
3558

    
3559
  def BuildHooksNodes(self):
3560
    """Build hooks nodes.
3561

3562
    """
3563
    mn = self.cfg.GetMasterNode()
3564
    return ([mn], [mn])
3565

    
3566
  def Exec(self, feedback_fn):
3567
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3568

    
3569
    assert instances == [self.op.instance_name], "Instance not locked"
3570

    
3571
    req = iallocator.IAReqGroupChange(instances=instances,
3572
                                      target_groups=list(self.target_uuids))
3573
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3574

    
3575
    ial.Run(self.op.iallocator)
3576

    
3577
    if not ial.success:
3578
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3579
                                 " instance '%s' using iallocator '%s': %s" %
3580
                                 (self.op.instance_name, self.op.iallocator,
3581
                                  ial.info), errors.ECODE_NORES)
3582

    
3583
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3584

    
3585
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3586
                 " instance '%s'", len(jobs), self.op.instance_name)
3587

    
3588
    return ResultWithJobs(jobs)