Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 07e68848

History | View | Annotate | Download (145.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234
    if vlan:
235
      nicparams[constants.NIC_VLAN] = vlan
236

    
237
    check_params = cluster.SimpleFillNIC(nicparams)
238
    objects.NIC.CheckParameterSyntax(check_params)
239
    net_uuid = cfg.LookupNetwork(net)
240
    name = nic.get(constants.INIC_NAME, None)
241
    if name is not None and name.lower() == constants.VALUE_NONE:
242
      name = None
243
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
244
                          network=net_uuid, nicparams=nicparams)
245
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
246
    nics.append(nic_obj)
247

    
248
  return nics
249

    
250

    
251
def _CheckForConflictingIp(lu, ip, node_uuid):
252
  """In case of conflicting IP address raise error.
253

254
  @type ip: string
255
  @param ip: IP address
256
  @type node_uuid: string
257
  @param node_uuid: node UUID
258

259
  """
260
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261
  if conf_net is not None:
262
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
263
                                " network %s, but the target NIC does not." %
264
                                (ip, conf_net)),
265
                               errors.ECODE_STATE)
266

    
267
  return (None, None)
268

    
269

    
270
def _ComputeIPolicyInstanceSpecViolation(
271
  ipolicy, instance_spec, disk_template,
272
  _compute_fn=ComputeIPolicySpecViolation):
273
  """Compute if instance specs meets the specs of ipolicy.
274

275
  @type ipolicy: dict
276
  @param ipolicy: The ipolicy to verify against
277
  @param instance_spec: dict
278
  @param instance_spec: The instance spec to verify
279
  @type disk_template: string
280
  @param disk_template: the disk template of the instance
281
  @param _compute_fn: The function to verify ipolicy (unittest only)
282
  @see: L{ComputeIPolicySpecViolation}
283

284
  """
285
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
286
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
287
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
288
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
289
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
290
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
291

    
292
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
293
                     disk_sizes, spindle_use, disk_template)
294

    
295

    
296
def _CheckOSVariant(os_obj, name):
297
  """Check whether an OS name conforms to the os variants specification.
298

299
  @type os_obj: L{objects.OS}
300
  @param os_obj: OS object to check
301
  @type name: string
302
  @param name: OS name passed by the user, to check for validity
303

304
  """
305
  variant = objects.OS.GetVariant(name)
306
  if not os_obj.supported_variants:
307
    if variant:
308
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
309
                                 " passed)" % (os_obj.name, variant),
310
                                 errors.ECODE_INVAL)
311
    return
312
  if not variant:
313
    raise errors.OpPrereqError("OS name must include a variant",
314
                               errors.ECODE_INVAL)
315

    
316
  if variant not in os_obj.supported_variants:
317
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
318

    
319

    
320
class LUInstanceCreate(LogicalUnit):
321
  """Create an instance.
322

323
  """
324
  HPATH = "instance-add"
325
  HTYPE = constants.HTYPE_INSTANCE
326
  REQ_BGL = False
327

    
328
  def _CheckDiskTemplateValid(self):
329
    """Checks validity of disk template.
330

331
    """
332
    cluster = self.cfg.GetClusterInfo()
333
    if self.op.disk_template is None:
334
      # FIXME: It would be better to take the default disk template from the
335
      # ipolicy, but for the ipolicy we need the primary node, which we get from
336
      # the iallocator, which wants the disk template as input. To solve this
337
      # chicken-and-egg problem, it should be possible to specify just a node
338
      # group from the iallocator and take the ipolicy from that.
339
      self.op.disk_template = cluster.enabled_disk_templates[0]
340
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
341

    
342
  def _CheckDiskArguments(self):
343
    """Checks validity of disk-related arguments.
344

345
    """
346
    # check that disk's names are unique and valid
347
    utils.ValidateDeviceNames("disk", self.op.disks)
348

    
349
    self._CheckDiskTemplateValid()
350

    
351
    # check disks. parameter names and consistent adopt/no-adopt strategy
352
    has_adopt = has_no_adopt = False
353
    for disk in self.op.disks:
354
      if self.op.disk_template != constants.DT_EXT:
355
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
356
      if constants.IDISK_ADOPT in disk:
357
        has_adopt = True
358
      else:
359
        has_no_adopt = True
360
    if has_adopt and has_no_adopt:
361
      raise errors.OpPrereqError("Either all disks are adopted or none is",
362
                                 errors.ECODE_INVAL)
363
    if has_adopt:
364
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
365
        raise errors.OpPrereqError("Disk adoption is not supported for the"
366
                                   " '%s' disk template" %
367
                                   self.op.disk_template,
368
                                   errors.ECODE_INVAL)
369
      if self.op.iallocator is not None:
370
        raise errors.OpPrereqError("Disk adoption not allowed with an"
371
                                   " iallocator script", errors.ECODE_INVAL)
372
      if self.op.mode == constants.INSTANCE_IMPORT:
373
        raise errors.OpPrereqError("Disk adoption not allowed for"
374
                                   " instance import", errors.ECODE_INVAL)
375
    else:
376
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
377
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
378
                                   " but no 'adopt' parameter given" %
379
                                   self.op.disk_template,
380
                                   errors.ECODE_INVAL)
381

    
382
    self.adopt_disks = has_adopt
383

    
384
  def _CheckVLANArguments(self):
385
    """ Check validity of VLANs if given
386

387
    """
388
    for nic in self.op.nics:
389
      vlan = nic.get(constants.INIC_VLAN, None)
390
      if vlan:
391
        if vlan[0] == ".":
392
          # vlan starting with dot means single untagged vlan,
393
          # might be followed by trunk (:)
394
          if not vlan[1:].isdigit():
395
            vlanlist = vlan[1:].split(':')
396
            for vl in vlanlist:
397
              if not vl.isdigit():
398
                raise errors.OpPrereqError("Specified VLAN parameter is "
399
                                           "invalid : %s" % vlan,
400
                                             errors.ECODE_INVAL)
401
        elif vlan[0] == ":":
402
          # Trunk - tagged only
403
          vlanlist = vlan[1:].split(':')
404
          for vl in vlanlist:
405
            if not vl.isdigit():
406
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
407
                                           " : %s" % vlan, errors.ECODE_INVAL)
408
        elif vlan.isdigit():
409
          # This is the simplest case. No dots, only single digit
410
          # -> Create untagged access port, dot needs to be added
411
          nic[constants.INIC_VLAN] = "." + vlan
412
        else:
413
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
414
                                       " : %s" % vlan, errors.ECODE_INVAL)
415

    
416
  def CheckArguments(self):
417
    """Check arguments.
418

419
    """
420
    # do not require name_check to ease forward/backward compatibility
421
    # for tools
422
    if self.op.no_install and self.op.start:
423
      self.LogInfo("No-installation mode selected, disabling startup")
424
      self.op.start = False
425
    # validate/normalize the instance name
426
    self.op.instance_name = \
427
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
428

    
429
    if self.op.ip_check and not self.op.name_check:
430
      # TODO: make the ip check more flexible and not depend on the name check
431
      raise errors.OpPrereqError("Cannot do IP address check without a name"
432
                                 " check", errors.ECODE_INVAL)
433

    
434
    # check nics' parameter names
435
    for nic in self.op.nics:
436
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
437
    # check that NIC's parameters names are unique and valid
438
    utils.ValidateDeviceNames("NIC", self.op.nics)
439

    
440
    self._CheckVLANArguments()
441

    
442
    self._CheckDiskArguments()
443
    assert self.op.disk_template is not None
444

    
445
    # instance name verification
446
    if self.op.name_check:
447
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
448
      self.op.instance_name = self.hostname.name
449
      # used in CheckPrereq for ip ping check
450
      self.check_ip = self.hostname.ip
451
    else:
452
      self.check_ip = None
453

    
454
    # file storage checks
455
    if (self.op.file_driver and
456
        not self.op.file_driver in constants.FILE_DRIVER):
457
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
458
                                 self.op.file_driver, errors.ECODE_INVAL)
459

    
460
    # set default file_driver if unset and required
461
    if (not self.op.file_driver and
462
        self.op.disk_template in [constants.DT_FILE,
463
                                  constants.DT_SHARED_FILE]):
464
      self.op.file_driver = constants.FD_LOOP
465

    
466
    ### Node/iallocator related checks
467
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
468

    
469
    if self.op.pnode is not None:
470
      if self.op.disk_template in constants.DTS_INT_MIRROR:
471
        if self.op.snode is None:
472
          raise errors.OpPrereqError("The networked disk templates need"
473
                                     " a mirror node", errors.ECODE_INVAL)
474
      elif self.op.snode:
475
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
476
                        " template")
477
        self.op.snode = None
478

    
479
    _CheckOpportunisticLocking(self.op)
480

    
481
    if self.op.mode == constants.INSTANCE_IMPORT:
482
      # On import force_variant must be True, because if we forced it at
483
      # initial install, our only chance when importing it back is that it
484
      # works again!
485
      self.op.force_variant = True
486

    
487
      if self.op.no_install:
488
        self.LogInfo("No-installation mode has no effect during import")
489

    
490
    elif self.op.mode == constants.INSTANCE_CREATE:
491
      if self.op.os_type is None:
492
        raise errors.OpPrereqError("No guest OS specified",
493
                                   errors.ECODE_INVAL)
494
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
495
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
496
                                   " installation" % self.op.os_type,
497
                                   errors.ECODE_STATE)
498
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
499
      self._cds = GetClusterDomainSecret()
500

    
501
      # Check handshake to ensure both clusters have the same domain secret
502
      src_handshake = self.op.source_handshake
503
      if not src_handshake:
504
        raise errors.OpPrereqError("Missing source handshake",
505
                                   errors.ECODE_INVAL)
506

    
507
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
508
                                                           src_handshake)
509
      if errmsg:
510
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
511
                                   errors.ECODE_INVAL)
512

    
513
      # Load and check source CA
514
      self.source_x509_ca_pem = self.op.source_x509_ca
515
      if not self.source_x509_ca_pem:
516
        raise errors.OpPrereqError("Missing source X509 CA",
517
                                   errors.ECODE_INVAL)
518

    
519
      try:
520
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
521
                                                    self._cds)
522
      except OpenSSL.crypto.Error, err:
523
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
524
                                   (err, ), errors.ECODE_INVAL)
525

    
526
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
527
      if errcode is not None:
528
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
529
                                   errors.ECODE_INVAL)
530

    
531
      self.source_x509_ca = cert
532

    
533
      src_instance_name = self.op.source_instance_name
534
      if not src_instance_name:
535
        raise errors.OpPrereqError("Missing source instance name",
536
                                   errors.ECODE_INVAL)
537

    
538
      self.source_instance_name = \
539
        netutils.GetHostname(name=src_instance_name).name
540

    
541
    else:
542
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
543
                                 self.op.mode, errors.ECODE_INVAL)
544

    
545
  def ExpandNames(self):
546
    """ExpandNames for CreateInstance.
547

548
    Figure out the right locks for instance creation.
549

550
    """
551
    self.needed_locks = {}
552

    
553
    # this is just a preventive check, but someone might still add this
554
    # instance in the meantime, and creation will fail at lock-add time
555
    if self.op.instance_name in\
556
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
557
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
558
                                 self.op.instance_name, errors.ECODE_EXISTS)
559

    
560
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
561

    
562
    if self.op.iallocator:
563
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
564
      # specifying a group on instance creation and then selecting nodes from
565
      # that group
566
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
567
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
568

    
569
      if self.op.opportunistic_locking:
570
        self.opportunistic_locks[locking.LEVEL_NODE] = True
571
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
572
    else:
573
      (self.op.pnode_uuid, self.op.pnode) = \
574
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
575
      nodelist = [self.op.pnode_uuid]
576
      if self.op.snode is not None:
577
        (self.op.snode_uuid, self.op.snode) = \
578
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
579
        nodelist.append(self.op.snode_uuid)
580
      self.needed_locks[locking.LEVEL_NODE] = nodelist
581

    
582
    # in case of import lock the source node too
583
    if self.op.mode == constants.INSTANCE_IMPORT:
584
      src_node = self.op.src_node
585
      src_path = self.op.src_path
586

    
587
      if src_path is None:
588
        self.op.src_path = src_path = self.op.instance_name
589

    
590
      if src_node is None:
591
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
592
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
593
        self.op.src_node = None
594
        if os.path.isabs(src_path):
595
          raise errors.OpPrereqError("Importing an instance from a path"
596
                                     " requires a source node option",
597
                                     errors.ECODE_INVAL)
598
      else:
599
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
600
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
601
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
602
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
603
        if not os.path.isabs(src_path):
604
          self.op.src_path = \
605
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
606

    
607
    self.needed_locks[locking.LEVEL_NODE_RES] = \
608
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
609

    
610
  def _RunAllocator(self):
611
    """Run the allocator based on input opcode.
612

613
    """
614
    if self.op.opportunistic_locking:
615
      # Only consider nodes for which a lock is held
616
      node_name_whitelist = self.cfg.GetNodeNames(
617
        self.owned_locks(locking.LEVEL_NODE))
618
    else:
619
      node_name_whitelist = None
620

    
621
    req = _CreateInstanceAllocRequest(self.op, self.disks,
622
                                      self.nics, self.be_full,
623
                                      node_name_whitelist)
624
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
625

    
626
    ial.Run(self.op.iallocator)
627

    
628
    if not ial.success:
629
      # When opportunistic locks are used only a temporary failure is generated
630
      if self.op.opportunistic_locking:
631
        ecode = errors.ECODE_TEMP_NORES
632
      else:
633
        ecode = errors.ECODE_NORES
634

    
635
      raise errors.OpPrereqError("Can't compute nodes using"
636
                                 " iallocator '%s': %s" %
637
                                 (self.op.iallocator, ial.info),
638
                                 ecode)
639

    
640
    (self.op.pnode_uuid, self.op.pnode) = \
641
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
642
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
643
                 self.op.instance_name, self.op.iallocator,
644
                 utils.CommaJoin(ial.result))
645

    
646
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
647

    
648
    if req.RequiredNodes() == 2:
649
      (self.op.snode_uuid, self.op.snode) = \
650
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
651

    
652
  def BuildHooksEnv(self):
653
    """Build hooks env.
654

655
    This runs on master, primary and secondary nodes of the instance.
656

657
    """
658
    env = {
659
      "ADD_MODE": self.op.mode,
660
      }
661
    if self.op.mode == constants.INSTANCE_IMPORT:
662
      env["SRC_NODE"] = self.op.src_node
663
      env["SRC_PATH"] = self.op.src_path
664
      env["SRC_IMAGES"] = self.src_images
665

    
666
    env.update(BuildInstanceHookEnv(
667
      name=self.op.instance_name,
668
      primary_node_name=self.op.pnode,
669
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
670
      status=self.op.start,
671
      os_type=self.op.os_type,
672
      minmem=self.be_full[constants.BE_MINMEM],
673
      maxmem=self.be_full[constants.BE_MAXMEM],
674
      vcpus=self.be_full[constants.BE_VCPUS],
675
      nics=NICListToTuple(self, self.nics),
676
      disk_template=self.op.disk_template,
677
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
678
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
679
             for d in self.disks],
680
      bep=self.be_full,
681
      hvp=self.hv_full,
682
      hypervisor_name=self.op.hypervisor,
683
      tags=self.op.tags,
684
      ))
685

    
686
    return env
687

    
688
  def BuildHooksNodes(self):
689
    """Build hooks nodes.
690

691
    """
692
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
693
    return nl, nl
694

    
695
  def _ReadExportInfo(self):
696
    """Reads the export information from disk.
697

698
    It will override the opcode source node and path with the actual
699
    information, if these two were not specified before.
700

701
    @return: the export information
702

703
    """
704
    assert self.op.mode == constants.INSTANCE_IMPORT
705

    
706
    if self.op.src_node_uuid is None:
707
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
708
      exp_list = self.rpc.call_export_list(locked_nodes)
709
      found = False
710
      for node_uuid in exp_list:
711
        if exp_list[node_uuid].fail_msg:
712
          continue
713
        if self.op.src_path in exp_list[node_uuid].payload:
714
          found = True
715
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
716
          self.op.src_node_uuid = node_uuid
717
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
718
                                            self.op.src_path)
719
          break
720
      if not found:
721
        raise errors.OpPrereqError("No export found for relative path %s" %
722
                                   self.op.src_path, errors.ECODE_INVAL)
723

    
724
    CheckNodeOnline(self, self.op.src_node_uuid)
725
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
726
    result.Raise("No export or invalid export found in dir %s" %
727
                 self.op.src_path)
728

    
729
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
730
    if not export_info.has_section(constants.INISECT_EXP):
731
      raise errors.ProgrammerError("Corrupted export config",
732
                                   errors.ECODE_ENVIRON)
733

    
734
    ei_version = export_info.get(constants.INISECT_EXP, "version")
735
    if int(ei_version) != constants.EXPORT_VERSION:
736
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
737
                                 (ei_version, constants.EXPORT_VERSION),
738
                                 errors.ECODE_ENVIRON)
739
    return export_info
740

    
741
  def _ReadExportParams(self, einfo):
742
    """Use export parameters as defaults.
743

744
    In case the opcode doesn't specify (as in override) some instance
745
    parameters, then try to use them from the export information, if
746
    that declares them.
747

748
    """
749
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
750

    
751
    if not self.op.disks:
752
      disks = []
753
      # TODO: import the disk iv_name too
754
      for idx in range(constants.MAX_DISKS):
755
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
756
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
757
          disks.append({constants.IDISK_SIZE: disk_sz})
758
      self.op.disks = disks
759
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
760
        raise errors.OpPrereqError("No disk info specified and the export"
761
                                   " is missing the disk information",
762
                                   errors.ECODE_INVAL)
763

    
764
    if not self.op.nics:
765
      nics = []
766
      for idx in range(constants.MAX_NICS):
767
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
768
          ndict = {}
769
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
770
            nic_param_name = "nic%d_%s" % (idx, name)
771
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
772
              v = einfo.get(constants.INISECT_INS, nic_param_name)
773
              ndict[name] = v
774
          nics.append(ndict)
775
        else:
776
          break
777
      self.op.nics = nics
778

    
779
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
780
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
781

    
782
    if (self.op.hypervisor is None and
783
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
784
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
785

    
786
    if einfo.has_section(constants.INISECT_HYP):
787
      # use the export parameters but do not override the ones
788
      # specified by the user
789
      for name, value in einfo.items(constants.INISECT_HYP):
790
        if name not in self.op.hvparams:
791
          self.op.hvparams[name] = value
792

    
793
    if einfo.has_section(constants.INISECT_BEP):
794
      # use the parameters, without overriding
795
      for name, value in einfo.items(constants.INISECT_BEP):
796
        if name not in self.op.beparams:
797
          self.op.beparams[name] = value
798
        # Compatibility for the old "memory" be param
799
        if name == constants.BE_MEMORY:
800
          if constants.BE_MAXMEM not in self.op.beparams:
801
            self.op.beparams[constants.BE_MAXMEM] = value
802
          if constants.BE_MINMEM not in self.op.beparams:
803
            self.op.beparams[constants.BE_MINMEM] = value
804
    else:
805
      # try to read the parameters old style, from the main section
806
      for name in constants.BES_PARAMETERS:
807
        if (name not in self.op.beparams and
808
            einfo.has_option(constants.INISECT_INS, name)):
809
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
810

    
811
    if einfo.has_section(constants.INISECT_OSP):
812
      # use the parameters, without overriding
813
      for name, value in einfo.items(constants.INISECT_OSP):
814
        if name not in self.op.osparams:
815
          self.op.osparams[name] = value
816

    
817
  def _RevertToDefaults(self, cluster):
818
    """Revert the instance parameters to the default values.
819

820
    """
821
    # hvparams
822
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
823
    for name in self.op.hvparams.keys():
824
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
825
        del self.op.hvparams[name]
826
    # beparams
827
    be_defs = cluster.SimpleFillBE({})
828
    for name in self.op.beparams.keys():
829
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
830
        del self.op.beparams[name]
831
    # nic params
832
    nic_defs = cluster.SimpleFillNIC({})
833
    for nic in self.op.nics:
834
      for name in constants.NICS_PARAMETERS:
835
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
836
          del nic[name]
837
    # osparams
838
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
839
    for name in self.op.osparams.keys():
840
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
841
        del self.op.osparams[name]
842

    
843
  def _CalculateFileStorageDir(self):
844
    """Calculate final instance file storage dir.
845

846
    """
847
    # file storage dir calculation/check
848
    self.instance_file_storage_dir = None
849
    if self.op.disk_template in constants.DTS_FILEBASED:
850
      # build the full file storage dir path
851
      joinargs = []
852

    
853
      if self.op.disk_template == constants.DT_SHARED_FILE:
854
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
855
      else:
856
        get_fsd_fn = self.cfg.GetFileStorageDir
857

    
858
      cfg_storagedir = get_fsd_fn()
859
      if not cfg_storagedir:
860
        raise errors.OpPrereqError("Cluster file storage dir not defined",
861
                                   errors.ECODE_STATE)
862
      joinargs.append(cfg_storagedir)
863

    
864
      if self.op.file_storage_dir is not None:
865
        joinargs.append(self.op.file_storage_dir)
866

    
867
      joinargs.append(self.op.instance_name)
868

    
869
      # pylint: disable=W0142
870
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
871

    
872
  def CheckPrereq(self): # pylint: disable=R0914
873
    """Check prerequisites.
874

875
    """
876
    self._CalculateFileStorageDir()
877

    
878
    if self.op.mode == constants.INSTANCE_IMPORT:
879
      export_info = self._ReadExportInfo()
880
      self._ReadExportParams(export_info)
881
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
882
    else:
883
      self._old_instance_name = None
884

    
885
    if (not self.cfg.GetVGName() and
886
        self.op.disk_template not in constants.DTS_NOT_LVM):
887
      raise errors.OpPrereqError("Cluster does not support lvm-based"
888
                                 " instances", errors.ECODE_STATE)
889

    
890
    if (self.op.hypervisor is None or
891
        self.op.hypervisor == constants.VALUE_AUTO):
892
      self.op.hypervisor = self.cfg.GetHypervisorType()
893

    
894
    cluster = self.cfg.GetClusterInfo()
895
    enabled_hvs = cluster.enabled_hypervisors
896
    if self.op.hypervisor not in enabled_hvs:
897
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
898
                                 " cluster (%s)" %
899
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
900
                                 errors.ECODE_STATE)
901

    
902
    # Check tag validity
903
    for tag in self.op.tags:
904
      objects.TaggableObject.ValidateTag(tag)
905

    
906
    # check hypervisor parameter syntax (locally)
907
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
908
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
909
                                      self.op.hvparams)
910
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
911
    hv_type.CheckParameterSyntax(filled_hvp)
912
    self.hv_full = filled_hvp
913
    # check that we don't specify global parameters on an instance
914
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
915
                         "instance", "cluster")
916

    
917
    # fill and remember the beparams dict
918
    self.be_full = _ComputeFullBeParams(self.op, cluster)
919

    
920
    # build os parameters
921
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
922

    
923
    # now that hvp/bep are in final format, let's reset to defaults,
924
    # if told to do so
925
    if self.op.identify_defaults:
926
      self._RevertToDefaults(cluster)
927

    
928
    # NIC buildup
929
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
930
                             self.proc.GetECId())
931

    
932
    # disk checks/pre-build
933
    default_vg = self.cfg.GetVGName()
934
    self.disks = ComputeDisks(self.op, default_vg)
935

    
936
    if self.op.mode == constants.INSTANCE_IMPORT:
937
      disk_images = []
938
      for idx in range(len(self.disks)):
939
        option = "disk%d_dump" % idx
940
        if export_info.has_option(constants.INISECT_INS, option):
941
          # FIXME: are the old os-es, disk sizes, etc. useful?
942
          export_name = export_info.get(constants.INISECT_INS, option)
943
          image = utils.PathJoin(self.op.src_path, export_name)
944
          disk_images.append(image)
945
        else:
946
          disk_images.append(False)
947

    
948
      self.src_images = disk_images
949

    
950
      if self.op.instance_name == self._old_instance_name:
951
        for idx, nic in enumerate(self.nics):
952
          if nic.mac == constants.VALUE_AUTO:
953
            nic_mac_ini = "nic%d_mac" % idx
954
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
955

    
956
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
957

    
958
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
959
    if self.op.ip_check:
960
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
961
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
962
                                   (self.check_ip, self.op.instance_name),
963
                                   errors.ECODE_NOTUNIQUE)
964

    
965
    #### mac address generation
966
    # By generating here the mac address both the allocator and the hooks get
967
    # the real final mac address rather than the 'auto' or 'generate' value.
968
    # There is a race condition between the generation and the instance object
969
    # creation, which means that we know the mac is valid now, but we're not
970
    # sure it will be when we actually add the instance. If things go bad
971
    # adding the instance will abort because of a duplicate mac, and the
972
    # creation job will fail.
973
    for nic in self.nics:
974
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
975
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
976

    
977
    #### allocator run
978

    
979
    if self.op.iallocator is not None:
980
      self._RunAllocator()
981

    
982
    # Release all unneeded node locks
983
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
984
                               self.op.src_node_uuid])
985
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
986
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
987
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
988

    
989
    assert (self.owned_locks(locking.LEVEL_NODE) ==
990
            self.owned_locks(locking.LEVEL_NODE_RES)), \
991
      "Node locks differ from node resource locks"
992

    
993
    #### node related checks
994

    
995
    # check primary node
996
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
997
    assert self.pnode is not None, \
998
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
999
    if pnode.offline:
1000
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1001
                                 pnode.name, errors.ECODE_STATE)
1002
    if pnode.drained:
1003
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1004
                                 pnode.name, errors.ECODE_STATE)
1005
    if not pnode.vm_capable:
1006
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1007
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1008

    
1009
    self.secondaries = []
1010

    
1011
    # Fill in any IPs from IP pools. This must happen here, because we need to
1012
    # know the nic's primary node, as specified by the iallocator
1013
    for idx, nic in enumerate(self.nics):
1014
      net_uuid = nic.network
1015
      if net_uuid is not None:
1016
        nobj = self.cfg.GetNetwork(net_uuid)
1017
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1018
        if netparams is None:
1019
          raise errors.OpPrereqError("No netparams found for network"
1020
                                     " %s. Probably not connected to"
1021
                                     " node's %s nodegroup" %
1022
                                     (nobj.name, self.pnode.name),
1023
                                     errors.ECODE_INVAL)
1024
        self.LogInfo("NIC/%d inherits netparams %s" %
1025
                     (idx, netparams.values()))
1026
        nic.nicparams = dict(netparams)
1027
        if nic.ip is not None:
1028
          if nic.ip.lower() == constants.NIC_IP_POOL:
1029
            try:
1030
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1031
            except errors.ReservationError:
1032
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1033
                                         " from the address pool" % idx,
1034
                                         errors.ECODE_STATE)
1035
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1036
          else:
1037
            try:
1038
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1039
            except errors.ReservationError:
1040
              raise errors.OpPrereqError("IP address %s already in use"
1041
                                         " or does not belong to network %s" %
1042
                                         (nic.ip, nobj.name),
1043
                                         errors.ECODE_NOTUNIQUE)
1044

    
1045
      # net is None, ip None or given
1046
      elif self.op.conflicts_check:
1047
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1048

    
1049
    # mirror node verification
1050
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1051
      if self.op.snode_uuid == pnode.uuid:
1052
        raise errors.OpPrereqError("The secondary node cannot be the"
1053
                                   " primary node", errors.ECODE_INVAL)
1054
      CheckNodeOnline(self, self.op.snode_uuid)
1055
      CheckNodeNotDrained(self, self.op.snode_uuid)
1056
      CheckNodeVmCapable(self, self.op.snode_uuid)
1057
      self.secondaries.append(self.op.snode_uuid)
1058

    
1059
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1060
      if pnode.group != snode.group:
1061
        self.LogWarning("The primary and secondary nodes are in two"
1062
                        " different node groups; the disk parameters"
1063
                        " from the first disk's node group will be"
1064
                        " used")
1065

    
1066
    nodes = [pnode]
1067
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1068
      nodes.append(snode)
1069
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1070
    excl_stor = compat.any(map(has_es, nodes))
1071
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1072
      raise errors.OpPrereqError("Disk template %s not supported with"
1073
                                 " exclusive storage" % self.op.disk_template,
1074
                                 errors.ECODE_STATE)
1075
    for disk in self.disks:
1076
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1077

    
1078
    node_uuids = [pnode.uuid] + self.secondaries
1079

    
1080
    if not self.adopt_disks:
1081
      if self.op.disk_template == constants.DT_RBD:
1082
        # _CheckRADOSFreeSpace() is just a placeholder.
1083
        # Any function that checks prerequisites can be placed here.
1084
        # Check if there is enough space on the RADOS cluster.
1085
        CheckRADOSFreeSpace()
1086
      elif self.op.disk_template == constants.DT_EXT:
1087
        # FIXME: Function that checks prereqs if needed
1088
        pass
1089
      elif self.op.disk_template in constants.DTS_LVM:
1090
        # Check lv size requirements, if not adopting
1091
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1092
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1093
      else:
1094
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1095
        pass
1096

    
1097
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1098
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1099
                                disk[constants.IDISK_ADOPT])
1100
                     for disk in self.disks])
1101
      if len(all_lvs) != len(self.disks):
1102
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1103
                                   errors.ECODE_INVAL)
1104
      for lv_name in all_lvs:
1105
        try:
1106
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1107
          # to ReserveLV uses the same syntax
1108
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1109
        except errors.ReservationError:
1110
          raise errors.OpPrereqError("LV named %s used by another instance" %
1111
                                     lv_name, errors.ECODE_NOTUNIQUE)
1112

    
1113
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1114
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1115

    
1116
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1117
                                       vg_names.payload.keys())[pnode.uuid]
1118
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1119
      node_lvs = node_lvs.payload
1120

    
1121
      delta = all_lvs.difference(node_lvs.keys())
1122
      if delta:
1123
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1124
                                   utils.CommaJoin(delta),
1125
                                   errors.ECODE_INVAL)
1126
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1127
      if online_lvs:
1128
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1129
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1130
                                   errors.ECODE_STATE)
1131
      # update the size of disk based on what is found
1132
      for dsk in self.disks:
1133
        dsk[constants.IDISK_SIZE] = \
1134
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1135
                                        dsk[constants.IDISK_ADOPT])][0]))
1136

    
1137
    elif self.op.disk_template == constants.DT_BLOCK:
1138
      # Normalize and de-duplicate device paths
1139
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1140
                       for disk in self.disks])
1141
      if len(all_disks) != len(self.disks):
1142
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1143
                                   errors.ECODE_INVAL)
1144
      baddisks = [d for d in all_disks
1145
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1146
      if baddisks:
1147
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1148
                                   " cannot be adopted" %
1149
                                   (utils.CommaJoin(baddisks),
1150
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1151
                                   errors.ECODE_INVAL)
1152

    
1153
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1154
                                            list(all_disks))[pnode.uuid]
1155
      node_disks.Raise("Cannot get block device information from node %s" %
1156
                       pnode.name)
1157
      node_disks = node_disks.payload
1158
      delta = all_disks.difference(node_disks.keys())
1159
      if delta:
1160
        raise errors.OpPrereqError("Missing block device(s): %s" %
1161
                                   utils.CommaJoin(delta),
1162
                                   errors.ECODE_INVAL)
1163
      for dsk in self.disks:
1164
        dsk[constants.IDISK_SIZE] = \
1165
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1166

    
1167
    # Check disk access param to be compatible with specified hypervisor
1168
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1169
    node_group = self.cfg.GetNodeGroup(node_info.group)
1170
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1171
    access_type = disk_params[self.op.disk_template].get(
1172
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1173
    )
1174

    
1175
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1176
                                            self.op.disk_template,
1177
                                            access_type):
1178
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1179
                                 " used with %s disk access param" %
1180
                                 (self.op.hypervisor, access_type),
1181
                                  errors.ECODE_STATE)
1182

    
1183
    # Verify instance specs
1184
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1185
    ispec = {
1186
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1187
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1188
      constants.ISPEC_DISK_COUNT: len(self.disks),
1189
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1190
                                  for disk in self.disks],
1191
      constants.ISPEC_NIC_COUNT: len(self.nics),
1192
      constants.ISPEC_SPINDLE_USE: spindle_use,
1193
      }
1194

    
1195
    group_info = self.cfg.GetNodeGroup(pnode.group)
1196
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1197
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1198
                                               self.op.disk_template)
1199
    if not self.op.ignore_ipolicy and res:
1200
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1201
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1202
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1203

    
1204
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1205

    
1206
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1207
    # check OS parameters (remotely)
1208
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1209

    
1210
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1211

    
1212
    #TODO: _CheckExtParams (remotely)
1213
    # Check parameters for extstorage
1214

    
1215
    # memory check on primary node
1216
    #TODO(dynmem): use MINMEM for checking
1217
    if self.op.start:
1218
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1219
                                self.op.hvparams)
1220
      CheckNodeFreeMemory(self, self.pnode.uuid,
1221
                          "creating instance %s" % self.op.instance_name,
1222
                          self.be_full[constants.BE_MAXMEM],
1223
                          self.op.hypervisor, hvfull)
1224

    
1225
    self.dry_run_result = list(node_uuids)
1226

    
1227
  def Exec(self, feedback_fn):
1228
    """Create and add the instance to the cluster.
1229

1230
    """
1231
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1232
                self.owned_locks(locking.LEVEL_NODE)), \
1233
      "Node locks differ from node resource locks"
1234
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1235

    
1236
    ht_kind = self.op.hypervisor
1237
    if ht_kind in constants.HTS_REQ_PORT:
1238
      network_port = self.cfg.AllocatePort()
1239
    else:
1240
      network_port = None
1241

    
1242
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1243

    
1244
    # This is ugly but we got a chicken-egg problem here
1245
    # We can only take the group disk parameters, as the instance
1246
    # has no disks yet (we are generating them right here).
1247
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1248
    disks = GenerateDiskTemplate(self,
1249
                                 self.op.disk_template,
1250
                                 instance_uuid, self.pnode.uuid,
1251
                                 self.secondaries,
1252
                                 self.disks,
1253
                                 self.instance_file_storage_dir,
1254
                                 self.op.file_driver,
1255
                                 0,
1256
                                 feedback_fn,
1257
                                 self.cfg.GetGroupDiskParams(nodegroup))
1258

    
1259
    iobj = objects.Instance(name=self.op.instance_name,
1260
                            uuid=instance_uuid,
1261
                            os=self.op.os_type,
1262
                            primary_node=self.pnode.uuid,
1263
                            nics=self.nics, disks=disks,
1264
                            disk_template=self.op.disk_template,
1265
                            disks_active=False,
1266
                            admin_state=constants.ADMINST_DOWN,
1267
                            network_port=network_port,
1268
                            beparams=self.op.beparams,
1269
                            hvparams=self.op.hvparams,
1270
                            hypervisor=self.op.hypervisor,
1271
                            osparams=self.op.osparams,
1272
                            )
1273

    
1274
    if self.op.tags:
1275
      for tag in self.op.tags:
1276
        iobj.AddTag(tag)
1277

    
1278
    if self.adopt_disks:
1279
      if self.op.disk_template == constants.DT_PLAIN:
1280
        # rename LVs to the newly-generated names; we need to construct
1281
        # 'fake' LV disks with the old data, plus the new unique_id
1282
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1283
        rename_to = []
1284
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1285
          rename_to.append(t_dsk.logical_id)
1286
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1287
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1288
                                               zip(tmp_disks, rename_to))
1289
        result.Raise("Failed to rename adoped LVs")
1290
    else:
1291
      feedback_fn("* creating instance disks...")
1292
      try:
1293
        CreateDisks(self, iobj)
1294
      except errors.OpExecError:
1295
        self.LogWarning("Device creation failed")
1296
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1297
        raise
1298

    
1299
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1300

    
1301
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1302

    
1303
    # Declare that we don't want to remove the instance lock anymore, as we've
1304
    # added the instance to the config
1305
    del self.remove_locks[locking.LEVEL_INSTANCE]
1306

    
1307
    if self.op.mode == constants.INSTANCE_IMPORT:
1308
      # Release unused nodes
1309
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1310
    else:
1311
      # Release all nodes
1312
      ReleaseLocks(self, locking.LEVEL_NODE)
1313

    
1314
    disk_abort = False
1315
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1316
      feedback_fn("* wiping instance disks...")
1317
      try:
1318
        WipeDisks(self, iobj)
1319
      except errors.OpExecError, err:
1320
        logging.exception("Wiping disks failed")
1321
        self.LogWarning("Wiping instance disks failed (%s)", err)
1322
        disk_abort = True
1323

    
1324
    if disk_abort:
1325
      # Something is already wrong with the disks, don't do anything else
1326
      pass
1327
    elif self.op.wait_for_sync:
1328
      disk_abort = not WaitForSync(self, iobj)
1329
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1330
      # make sure the disks are not degraded (still sync-ing is ok)
1331
      feedback_fn("* checking mirrors status")
1332
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1333
    else:
1334
      disk_abort = False
1335

    
1336
    if disk_abort:
1337
      RemoveDisks(self, iobj)
1338
      self.cfg.RemoveInstance(iobj.uuid)
1339
      # Make sure the instance lock gets removed
1340
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1341
      raise errors.OpExecError("There are some degraded disks for"
1342
                               " this instance")
1343

    
1344
    # instance disks are now active
1345
    iobj.disks_active = True
1346

    
1347
    # Release all node resource locks
1348
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1349

    
1350
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1351
      if self.op.mode == constants.INSTANCE_CREATE:
1352
        if not self.op.no_install:
1353
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1354
                        not self.op.wait_for_sync)
1355
          if pause_sync:
1356
            feedback_fn("* pausing disk sync to install instance OS")
1357
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1358
                                                              (iobj.disks,
1359
                                                               iobj), True)
1360
            for idx, success in enumerate(result.payload):
1361
              if not success:
1362
                logging.warn("pause-sync of instance %s for disk %d failed",
1363
                             self.op.instance_name, idx)
1364

    
1365
          feedback_fn("* running the instance OS create scripts...")
1366
          # FIXME: pass debug option from opcode to backend
1367
          os_add_result = \
1368
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1369
                                          self.op.debug_level)
1370
          if pause_sync:
1371
            feedback_fn("* resuming disk sync")
1372
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1373
                                                              (iobj.disks,
1374
                                                               iobj), False)
1375
            for idx, success in enumerate(result.payload):
1376
              if not success:
1377
                logging.warn("resume-sync of instance %s for disk %d failed",
1378
                             self.op.instance_name, idx)
1379

    
1380
          os_add_result.Raise("Could not add os for instance %s"
1381
                              " on node %s" % (self.op.instance_name,
1382
                                               self.pnode.name))
1383

    
1384
      else:
1385
        if self.op.mode == constants.INSTANCE_IMPORT:
1386
          feedback_fn("* running the instance OS import scripts...")
1387

    
1388
          transfers = []
1389

    
1390
          for idx, image in enumerate(self.src_images):
1391
            if not image:
1392
              continue
1393

    
1394
            # FIXME: pass debug option from opcode to backend
1395
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1396
                                               constants.IEIO_FILE, (image, ),
1397
                                               constants.IEIO_SCRIPT,
1398
                                               ((iobj.disks[idx], iobj), idx),
1399
                                               None)
1400
            transfers.append(dt)
1401

    
1402
          import_result = \
1403
            masterd.instance.TransferInstanceData(self, feedback_fn,
1404
                                                  self.op.src_node_uuid,
1405
                                                  self.pnode.uuid,
1406
                                                  self.pnode.secondary_ip,
1407
                                                  iobj, transfers)
1408
          if not compat.all(import_result):
1409
            self.LogWarning("Some disks for instance %s on node %s were not"
1410
                            " imported successfully" % (self.op.instance_name,
1411
                                                        self.pnode.name))
1412

    
1413
          rename_from = self._old_instance_name
1414

    
1415
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1416
          feedback_fn("* preparing remote import...")
1417
          # The source cluster will stop the instance before attempting to make
1418
          # a connection. In some cases stopping an instance can take a long
1419
          # time, hence the shutdown timeout is added to the connection
1420
          # timeout.
1421
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1422
                             self.op.source_shutdown_timeout)
1423
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1424

    
1425
          assert iobj.primary_node == self.pnode.uuid
1426
          disk_results = \
1427
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1428
                                          self.source_x509_ca,
1429
                                          self._cds, timeouts)
1430
          if not compat.all(disk_results):
1431
            # TODO: Should the instance still be started, even if some disks
1432
            # failed to import (valid for local imports, too)?
1433
            self.LogWarning("Some disks for instance %s on node %s were not"
1434
                            " imported successfully" % (self.op.instance_name,
1435
                                                        self.pnode.name))
1436

    
1437
          rename_from = self.source_instance_name
1438

    
1439
        else:
1440
          # also checked in the prereq part
1441
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1442
                                       % self.op.mode)
1443

    
1444
        # Run rename script on newly imported instance
1445
        assert iobj.name == self.op.instance_name
1446
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1447
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1448
                                                   rename_from,
1449
                                                   self.op.debug_level)
1450
        result.Warn("Failed to run rename script for %s on node %s" %
1451
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1452

    
1453
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1454

    
1455
    if self.op.start:
1456
      iobj.admin_state = constants.ADMINST_UP
1457
      self.cfg.Update(iobj, feedback_fn)
1458
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1459
                   self.pnode.name)
1460
      feedback_fn("* starting instance...")
1461
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1462
                                            False, self.op.reason)
1463
      result.Raise("Could not start instance")
1464

    
1465
    return list(iobj.all_nodes)
1466

    
1467

    
1468
class LUInstanceRename(LogicalUnit):
1469
  """Rename an instance.
1470

1471
  """
1472
  HPATH = "instance-rename"
1473
  HTYPE = constants.HTYPE_INSTANCE
1474

    
1475
  def CheckArguments(self):
1476
    """Check arguments.
1477

1478
    """
1479
    if self.op.ip_check and not self.op.name_check:
1480
      # TODO: make the ip check more flexible and not depend on the name check
1481
      raise errors.OpPrereqError("IP address check requires a name check",
1482
                                 errors.ECODE_INVAL)
1483

    
1484
  def BuildHooksEnv(self):
1485
    """Build hooks env.
1486

1487
    This runs on master, primary and secondary nodes of the instance.
1488

1489
    """
1490
    env = BuildInstanceHookEnvByObject(self, self.instance)
1491
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1492
    return env
1493

    
1494
  def BuildHooksNodes(self):
1495
    """Build hooks nodes.
1496

1497
    """
1498
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1499
    return (nl, nl)
1500

    
1501
  def CheckPrereq(self):
1502
    """Check prerequisites.
1503

1504
    This checks that the instance is in the cluster and is not running.
1505

1506
    """
1507
    (self.op.instance_uuid, self.op.instance_name) = \
1508
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1509
                                self.op.instance_name)
1510
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1511
    assert instance is not None
1512

    
1513
    # It should actually not happen that an instance is running with a disabled
1514
    # disk template, but in case it does, the renaming of file-based instances
1515
    # will fail horribly. Thus, we test it before.
1516
    if (instance.disk_template in constants.DTS_FILEBASED and
1517
        self.op.new_name != instance.name):
1518
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1519
                               instance.disk_template)
1520

    
1521
    CheckNodeOnline(self, instance.primary_node)
1522
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1523
                       msg="cannot rename")
1524
    self.instance = instance
1525

    
1526
    new_name = self.op.new_name
1527
    if self.op.name_check:
1528
      hostname = _CheckHostnameSane(self, new_name)
1529
      new_name = self.op.new_name = hostname.name
1530
      if (self.op.ip_check and
1531
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1532
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1533
                                   (hostname.ip, new_name),
1534
                                   errors.ECODE_NOTUNIQUE)
1535

    
1536
    instance_names = [inst.name for
1537
                      inst in self.cfg.GetAllInstancesInfo().values()]
1538
    if new_name in instance_names and new_name != instance.name:
1539
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1540
                                 new_name, errors.ECODE_EXISTS)
1541

    
1542
  def Exec(self, feedback_fn):
1543
    """Rename the instance.
1544

1545
    """
1546
    old_name = self.instance.name
1547

    
1548
    rename_file_storage = False
1549
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1550
        self.op.new_name != self.instance.name):
1551
      old_file_storage_dir = os.path.dirname(
1552
                               self.instance.disks[0].logical_id[1])
1553
      rename_file_storage = True
1554

    
1555
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1556
    # Change the instance lock. This is definitely safe while we hold the BGL.
1557
    # Otherwise the new lock would have to be added in acquired mode.
1558
    assert self.REQ_BGL
1559
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1560
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1561
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1562

    
1563
    # re-read the instance from the configuration after rename
1564
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1565

    
1566
    if rename_file_storage:
1567
      new_file_storage_dir = os.path.dirname(
1568
                               renamed_inst.disks[0].logical_id[1])
1569
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1570
                                                     old_file_storage_dir,
1571
                                                     new_file_storage_dir)
1572
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1573
                   " (but the instance has been renamed in Ganeti)" %
1574
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1575
                    old_file_storage_dir, new_file_storage_dir))
1576

    
1577
    StartInstanceDisks(self, renamed_inst, None)
1578
    # update info on disks
1579
    info = GetInstanceInfoText(renamed_inst)
1580
    for (idx, disk) in enumerate(renamed_inst.disks):
1581
      for node_uuid in renamed_inst.all_nodes:
1582
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1583
                                                (disk, renamed_inst), info)
1584
        result.Warn("Error setting info on node %s for disk %s" %
1585
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1586
    try:
1587
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1588
                                                 renamed_inst, old_name,
1589
                                                 self.op.debug_level)
1590
      result.Warn("Could not run OS rename script for instance %s on node %s"
1591
                  " (but the instance has been renamed in Ganeti)" %
1592
                  (renamed_inst.name,
1593
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1594
                  self.LogWarning)
1595
    finally:
1596
      ShutdownInstanceDisks(self, renamed_inst)
1597

    
1598
    return renamed_inst.name
1599

    
1600

    
1601
class LUInstanceRemove(LogicalUnit):
1602
  """Remove an instance.
1603

1604
  """
1605
  HPATH = "instance-remove"
1606
  HTYPE = constants.HTYPE_INSTANCE
1607
  REQ_BGL = False
1608

    
1609
  def ExpandNames(self):
1610
    self._ExpandAndLockInstance()
1611
    self.needed_locks[locking.LEVEL_NODE] = []
1612
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1613
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1614

    
1615
  def DeclareLocks(self, level):
1616
    if level == locking.LEVEL_NODE:
1617
      self._LockInstancesNodes()
1618
    elif level == locking.LEVEL_NODE_RES:
1619
      # Copy node locks
1620
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1621
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1622

    
1623
  def BuildHooksEnv(self):
1624
    """Build hooks env.
1625

1626
    This runs on master, primary and secondary nodes of the instance.
1627

1628
    """
1629
    env = BuildInstanceHookEnvByObject(self, self.instance)
1630
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1631
    return env
1632

    
1633
  def BuildHooksNodes(self):
1634
    """Build hooks nodes.
1635

1636
    """
1637
    nl = [self.cfg.GetMasterNode()]
1638
    nl_post = list(self.instance.all_nodes) + nl
1639
    return (nl, nl_post)
1640

    
1641
  def CheckPrereq(self):
1642
    """Check prerequisites.
1643

1644
    This checks that the instance is in the cluster.
1645

1646
    """
1647
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1648
    assert self.instance is not None, \
1649
      "Cannot retrieve locked instance %s" % self.op.instance_name
1650

    
1651
  def Exec(self, feedback_fn):
1652
    """Remove the instance.
1653

1654
    """
1655
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1656
                 self.cfg.GetNodeName(self.instance.primary_node))
1657

    
1658
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1659
                                             self.instance,
1660
                                             self.op.shutdown_timeout,
1661
                                             self.op.reason)
1662
    if self.op.ignore_failures:
1663
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1664
    else:
1665
      result.Raise("Could not shutdown instance %s on node %s" %
1666
                   (self.instance.name,
1667
                    self.cfg.GetNodeName(self.instance.primary_node)))
1668

    
1669
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1670
            self.owned_locks(locking.LEVEL_NODE_RES))
1671
    assert not (set(self.instance.all_nodes) -
1672
                self.owned_locks(locking.LEVEL_NODE)), \
1673
      "Not owning correct locks"
1674

    
1675
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1676

    
1677

    
1678
class LUInstanceMove(LogicalUnit):
1679
  """Move an instance by data-copying.
1680

1681
  """
1682
  HPATH = "instance-move"
1683
  HTYPE = constants.HTYPE_INSTANCE
1684
  REQ_BGL = False
1685

    
1686
  def ExpandNames(self):
1687
    self._ExpandAndLockInstance()
1688
    (self.op.target_node_uuid, self.op.target_node) = \
1689
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1690
                            self.op.target_node)
1691
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1692
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1693
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1694

    
1695
  def DeclareLocks(self, level):
1696
    if level == locking.LEVEL_NODE:
1697
      self._LockInstancesNodes(primary_only=True)
1698
    elif level == locking.LEVEL_NODE_RES:
1699
      # Copy node locks
1700
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1701
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1702

    
1703
  def BuildHooksEnv(self):
1704
    """Build hooks env.
1705

1706
    This runs on master, primary and secondary nodes of the instance.
1707

1708
    """
1709
    env = {
1710
      "TARGET_NODE": self.op.target_node,
1711
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1712
      }
1713
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1714
    return env
1715

    
1716
  def BuildHooksNodes(self):
1717
    """Build hooks nodes.
1718

1719
    """
1720
    nl = [
1721
      self.cfg.GetMasterNode(),
1722
      self.instance.primary_node,
1723
      self.op.target_node_uuid,
1724
      ]
1725
    return (nl, nl)
1726

    
1727
  def CheckPrereq(self):
1728
    """Check prerequisites.
1729

1730
    This checks that the instance is in the cluster.
1731

1732
    """
1733
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1734
    assert self.instance is not None, \
1735
      "Cannot retrieve locked instance %s" % self.op.instance_name
1736

    
1737
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1738
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1739
                                 self.instance.disk_template,
1740
                                 errors.ECODE_STATE)
1741

    
1742
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1743
    assert target_node is not None, \
1744
      "Cannot retrieve locked node %s" % self.op.target_node
1745

    
1746
    self.target_node_uuid = target_node.uuid
1747
    if target_node.uuid == self.instance.primary_node:
1748
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1749
                                 (self.instance.name, target_node.name),
1750
                                 errors.ECODE_STATE)
1751

    
1752
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1753

    
1754
    for idx, dsk in enumerate(self.instance.disks):
1755
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1756
                              constants.DT_SHARED_FILE):
1757
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1758
                                   " cannot copy" % idx, errors.ECODE_STATE)
1759

    
1760
    CheckNodeOnline(self, target_node.uuid)
1761
    CheckNodeNotDrained(self, target_node.uuid)
1762
    CheckNodeVmCapable(self, target_node.uuid)
1763
    cluster = self.cfg.GetClusterInfo()
1764
    group_info = self.cfg.GetNodeGroup(target_node.group)
1765
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1766
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1767
                           ignore=self.op.ignore_ipolicy)
1768

    
1769
    if self.instance.admin_state == constants.ADMINST_UP:
1770
      # check memory requirements on the secondary node
1771
      CheckNodeFreeMemory(
1772
          self, target_node.uuid, "failing over instance %s" %
1773
          self.instance.name, bep[constants.BE_MAXMEM],
1774
          self.instance.hypervisor,
1775
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1776
    else:
1777
      self.LogInfo("Not checking memory on the secondary node as"
1778
                   " instance will not be started")
1779

    
1780
    # check bridge existance
1781
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1782

    
1783
  def Exec(self, feedback_fn):
1784
    """Move an instance.
1785

1786
    The move is done by shutting it down on its present node, copying
1787
    the data over (slow) and starting it on the new node.
1788

1789
    """
1790
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1791
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1792

    
1793
    self.LogInfo("Shutting down instance %s on source node %s",
1794
                 self.instance.name, source_node.name)
1795

    
1796
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1797
            self.owned_locks(locking.LEVEL_NODE_RES))
1798

    
1799
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1800
                                             self.op.shutdown_timeout,
1801
                                             self.op.reason)
1802
    if self.op.ignore_consistency:
1803
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1804
                  " anyway. Please make sure node %s is down. Error details" %
1805
                  (self.instance.name, source_node.name, source_node.name),
1806
                  self.LogWarning)
1807
    else:
1808
      result.Raise("Could not shutdown instance %s on node %s" %
1809
                   (self.instance.name, source_node.name))
1810

    
1811
    # create the target disks
1812
    try:
1813
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1814
    except errors.OpExecError:
1815
      self.LogWarning("Device creation failed")
1816
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1817
      raise
1818

    
1819
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1820

    
1821
    errs = []
1822
    # activate, get path, copy the data over
1823
    for idx, disk in enumerate(self.instance.disks):
1824
      self.LogInfo("Copying data for disk %d", idx)
1825
      result = self.rpc.call_blockdev_assemble(
1826
                 target_node.uuid, (disk, self.instance), self.instance.name,
1827
                 True, idx)
1828
      if result.fail_msg:
1829
        self.LogWarning("Can't assemble newly created disk %d: %s",
1830
                        idx, result.fail_msg)
1831
        errs.append(result.fail_msg)
1832
        break
1833
      dev_path, _ = result.payload
1834
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1835
                                                                self.instance),
1836
                                             target_node.secondary_ip,
1837
                                             dev_path, cluster_name)
1838
      if result.fail_msg:
1839
        self.LogWarning("Can't copy data over for disk %d: %s",
1840
                        idx, result.fail_msg)
1841
        errs.append(result.fail_msg)
1842
        break
1843

    
1844
    if errs:
1845
      self.LogWarning("Some disks failed to copy, aborting")
1846
      try:
1847
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1848
      finally:
1849
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1850
        raise errors.OpExecError("Errors during disk copy: %s" %
1851
                                 (",".join(errs),))
1852

    
1853
    self.instance.primary_node = target_node.uuid
1854
    self.cfg.Update(self.instance, feedback_fn)
1855

    
1856
    self.LogInfo("Removing the disks on the original node")
1857
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1858

    
1859
    # Only start the instance if it's marked as up
1860
    if self.instance.admin_state == constants.ADMINST_UP:
1861
      self.LogInfo("Starting instance %s on node %s",
1862
                   self.instance.name, target_node.name)
1863

    
1864
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1865
                                          ignore_secondaries=True)
1866
      if not disks_ok:
1867
        ShutdownInstanceDisks(self, self.instance)
1868
        raise errors.OpExecError("Can't activate the instance's disks")
1869

    
1870
      result = self.rpc.call_instance_start(target_node.uuid,
1871
                                            (self.instance, None, None), False,
1872
                                            self.op.reason)
1873
      msg = result.fail_msg
1874
      if msg:
1875
        ShutdownInstanceDisks(self, self.instance)
1876
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1877
                                 (self.instance.name, target_node.name, msg))
1878

    
1879

    
1880
class LUInstanceMultiAlloc(NoHooksLU):
1881
  """Allocates multiple instances at the same time.
1882

1883
  """
1884
  REQ_BGL = False
1885

    
1886
  def CheckArguments(self):
1887
    """Check arguments.
1888

1889
    """
1890
    nodes = []
1891
    for inst in self.op.instances:
1892
      if inst.iallocator is not None:
1893
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1894
                                   " instance objects", errors.ECODE_INVAL)
1895
      nodes.append(bool(inst.pnode))
1896
      if inst.disk_template in constants.DTS_INT_MIRROR:
1897
        nodes.append(bool(inst.snode))
1898

    
1899
    has_nodes = compat.any(nodes)
1900
    if compat.all(nodes) ^ has_nodes:
1901
      raise errors.OpPrereqError("There are instance objects providing"
1902
                                 " pnode/snode while others do not",
1903
                                 errors.ECODE_INVAL)
1904

    
1905
    if not has_nodes and self.op.iallocator is None:
1906
      default_iallocator = self.cfg.GetDefaultIAllocator()
1907
      if default_iallocator:
1908
        self.op.iallocator = default_iallocator
1909
      else:
1910
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1911
                                   " given and no cluster-wide default"
1912
                                   " iallocator found; please specify either"
1913
                                   " an iallocator or nodes on the instances"
1914
                                   " or set a cluster-wide default iallocator",
1915
                                   errors.ECODE_INVAL)
1916

    
1917
    _CheckOpportunisticLocking(self.op)
1918

    
1919
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1920
    if dups:
1921
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1922
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1923

    
1924
  def ExpandNames(self):
1925
    """Calculate the locks.
1926

1927
    """
1928
    self.share_locks = ShareAll()
1929
    self.needed_locks = {
1930
      # iallocator will select nodes and even if no iallocator is used,
1931
      # collisions with LUInstanceCreate should be avoided
1932
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1933
      }
1934

    
1935
    if self.op.iallocator:
1936
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1937
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1938

    
1939
      if self.op.opportunistic_locking:
1940
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1941
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1942
    else:
1943
      nodeslist = []
1944
      for inst in self.op.instances:
1945
        (inst.pnode_uuid, inst.pnode) = \
1946
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1947
        nodeslist.append(inst.pnode_uuid)
1948
        if inst.snode is not None:
1949
          (inst.snode_uuid, inst.snode) = \
1950
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1951
          nodeslist.append(inst.snode_uuid)
1952

    
1953
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1954
      # Lock resources of instance's primary and secondary nodes (copy to
1955
      # prevent accidential modification)
1956
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1957

    
1958
  def CheckPrereq(self):
1959
    """Check prerequisite.
1960

1961
    """
1962
    if self.op.iallocator:
1963
      cluster = self.cfg.GetClusterInfo()
1964
      default_vg = self.cfg.GetVGName()
1965
      ec_id = self.proc.GetECId()
1966

    
1967
      if self.op.opportunistic_locking:
1968
        # Only consider nodes for which a lock is held
1969
        node_whitelist = self.cfg.GetNodeNames(
1970
                           list(self.owned_locks(locking.LEVEL_NODE)))
1971
      else:
1972
        node_whitelist = None
1973

    
1974
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1975
                                           _ComputeNics(op, cluster, None,
1976
                                                        self.cfg, ec_id),
1977
                                           _ComputeFullBeParams(op, cluster),
1978
                                           node_whitelist)
1979
               for op in self.op.instances]
1980

    
1981
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1982
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1983

    
1984
      ial.Run(self.op.iallocator)
1985

    
1986
      if not ial.success:
1987
        raise errors.OpPrereqError("Can't compute nodes using"
1988
                                   " iallocator '%s': %s" %
1989
                                   (self.op.iallocator, ial.info),
1990
                                   errors.ECODE_NORES)
1991

    
1992
      self.ia_result = ial.result
1993

    
1994
    if self.op.dry_run:
1995
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1996
        constants.JOB_IDS_KEY: [],
1997
        })
1998

    
1999
  def _ConstructPartialResult(self):
2000
    """Contructs the partial result.
2001

2002
    """
2003
    if self.op.iallocator:
2004
      (allocatable, failed_insts) = self.ia_result
2005
      allocatable_insts = map(compat.fst, allocatable)
2006
    else:
2007
      allocatable_insts = [op.instance_name for op in self.op.instances]
2008
      failed_insts = []
2009

    
2010
    return {
2011
      constants.ALLOCATABLE_KEY: allocatable_insts,
2012
      constants.FAILED_KEY: failed_insts,
2013
      }
2014

    
2015
  def Exec(self, feedback_fn):
2016
    """Executes the opcode.
2017

2018
    """
2019
    jobs = []
2020
    if self.op.iallocator:
2021
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2022
      (allocatable, failed) = self.ia_result
2023

    
2024
      for (name, node_names) in allocatable:
2025
        op = op2inst.pop(name)
2026

    
2027
        (op.pnode_uuid, op.pnode) = \
2028
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2029
        if len(node_names) > 1:
2030
          (op.snode_uuid, op.snode) = \
2031
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2032

    
2033
          jobs.append([op])
2034

    
2035
        missing = set(op2inst.keys()) - set(failed)
2036
        assert not missing, \
2037
          "Iallocator did return incomplete result: %s" % \
2038
          utils.CommaJoin(missing)
2039
    else:
2040
      jobs.extend([op] for op in self.op.instances)
2041

    
2042
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2043

    
2044

    
2045
class _InstNicModPrivate:
2046
  """Data structure for network interface modifications.
2047

2048
  Used by L{LUInstanceSetParams}.
2049

2050
  """
2051
  def __init__(self):
2052
    self.params = None
2053
    self.filled = None
2054

    
2055

    
2056
def _PrepareContainerMods(mods, private_fn):
2057
  """Prepares a list of container modifications by adding a private data field.
2058

2059
  @type mods: list of tuples; (operation, index, parameters)
2060
  @param mods: List of modifications
2061
  @type private_fn: callable or None
2062
  @param private_fn: Callable for constructing a private data field for a
2063
    modification
2064
  @rtype: list
2065

2066
  """
2067
  if private_fn is None:
2068
    fn = lambda: None
2069
  else:
2070
    fn = private_fn
2071

    
2072
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2073

    
2074

    
2075
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2076
  """Checks if nodes have enough physical CPUs
2077

2078
  This function checks if all given nodes have the needed number of
2079
  physical CPUs. In case any node has less CPUs or we cannot get the
2080
  information from the node, this function raises an OpPrereqError
2081
  exception.
2082

2083
  @type lu: C{LogicalUnit}
2084
  @param lu: a logical unit from which we get configuration data
2085
  @type node_uuids: C{list}
2086
  @param node_uuids: the list of node UUIDs to check
2087
  @type requested: C{int}
2088
  @param requested: the minimum acceptable number of physical CPUs
2089
  @type hypervisor_specs: list of pairs (string, dict of strings)
2090
  @param hypervisor_specs: list of hypervisor specifications in
2091
      pairs (hypervisor_name, hvparams)
2092
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2093
      or we cannot check the node
2094

2095
  """
2096
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2097
  for node_uuid in node_uuids:
2098
    info = nodeinfo[node_uuid]
2099
    node_name = lu.cfg.GetNodeName(node_uuid)
2100
    info.Raise("Cannot get current information from node %s" % node_name,
2101
               prereq=True, ecode=errors.ECODE_ENVIRON)
2102
    (_, _, (hv_info, )) = info.payload
2103
    num_cpus = hv_info.get("cpu_total", None)
2104
    if not isinstance(num_cpus, int):
2105
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2106
                                 " on node %s, result was '%s'" %
2107
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2108
    if requested > num_cpus:
2109
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2110
                                 "required" % (node_name, num_cpus, requested),
2111
                                 errors.ECODE_NORES)
2112

    
2113

    
2114
def GetItemFromContainer(identifier, kind, container):
2115
  """Return the item refered by the identifier.
2116

2117
  @type identifier: string
2118
  @param identifier: Item index or name or UUID
2119
  @type kind: string
2120
  @param kind: One-word item description
2121
  @type container: list
2122
  @param container: Container to get the item from
2123

2124
  """
2125
  # Index
2126
  try:
2127
    idx = int(identifier)
2128
    if idx == -1:
2129
      # Append
2130
      absidx = len(container) - 1
2131
    elif idx < 0:
2132
      raise IndexError("Not accepting negative indices other than -1")
2133
    elif idx > len(container):
2134
      raise IndexError("Got %s index %s, but there are only %s" %
2135
                       (kind, idx, len(container)))
2136
    else:
2137
      absidx = idx
2138
    return (absidx, container[idx])
2139
  except ValueError:
2140
    pass
2141

    
2142
  for idx, item in enumerate(container):
2143
    if item.uuid == identifier or item.name == identifier:
2144
      return (idx, item)
2145

    
2146
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2147
                             (kind, identifier), errors.ECODE_NOENT)
2148

    
2149

    
2150
def _ApplyContainerMods(kind, container, chgdesc, mods,
2151
                        create_fn, modify_fn, remove_fn,
2152
                        post_add_fn=None):
2153
  """Applies descriptions in C{mods} to C{container}.
2154

2155
  @type kind: string
2156
  @param kind: One-word item description
2157
  @type container: list
2158
  @param container: Container to modify
2159
  @type chgdesc: None or list
2160
  @param chgdesc: List of applied changes
2161
  @type mods: list
2162
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2163
  @type create_fn: callable
2164
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2165
    receives absolute item index, parameters and private data object as added
2166
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2167
    as list
2168
  @type modify_fn: callable
2169
  @param modify_fn: Callback for modifying an existing item
2170
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2171
    and private data object as added by L{_PrepareContainerMods}, returns
2172
    changes as list
2173
  @type remove_fn: callable
2174
  @param remove_fn: Callback on removing item; receives absolute item index,
2175
    item and private data object as added by L{_PrepareContainerMods}
2176
  @type post_add_fn: callable
2177
  @param post_add_fn: Callable for post-processing a newly created item after
2178
    it has been put into the container. It receives the index of the new item
2179
    and the new item as parameters.
2180

2181
  """
2182
  for (op, identifier, params, private) in mods:
2183
    changes = None
2184

    
2185
    if op == constants.DDM_ADD:
2186
      # Calculate where item will be added
2187
      # When adding an item, identifier can only be an index
2188
      try:
2189
        idx = int(identifier)
2190
      except ValueError:
2191
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2192
                                   " identifier for %s" % constants.DDM_ADD,
2193
                                   errors.ECODE_INVAL)
2194
      if idx == -1:
2195
        addidx = len(container)
2196
      else:
2197
        if idx < 0:
2198
          raise IndexError("Not accepting negative indices other than -1")
2199
        elif idx > len(container):
2200
          raise IndexError("Got %s index %s, but there are only %s" %
2201
                           (kind, idx, len(container)))
2202
        addidx = idx
2203

    
2204
      if create_fn is None:
2205
        item = params
2206
      else:
2207
        (item, changes) = create_fn(addidx, params, private)
2208

    
2209
      if idx == -1:
2210
        container.append(item)
2211
      else:
2212
        assert idx >= 0
2213
        assert idx <= len(container)
2214
        # list.insert does so before the specified index
2215
        container.insert(idx, item)
2216

    
2217
      if post_add_fn is not None:
2218
        post_add_fn(addidx, item)
2219

    
2220
    else:
2221
      # Retrieve existing item
2222
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2223

    
2224
      if op == constants.DDM_REMOVE:
2225
        assert not params
2226

    
2227
        changes = [("%s/%s" % (kind, absidx), "remove")]
2228

    
2229
        if remove_fn is not None:
2230
          msg = remove_fn(absidx, item, private)
2231
          if msg:
2232
            changes.append(("%s/%s" % (kind, absidx), msg))
2233

    
2234
        assert container[absidx] == item
2235
        del container[absidx]
2236
      elif op == constants.DDM_MODIFY:
2237
        if modify_fn is not None:
2238
          changes = modify_fn(absidx, item, params, private)
2239
      else:
2240
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2241

    
2242
    assert _TApplyContModsCbChanges(changes)
2243

    
2244
    if not (chgdesc is None or changes is None):
2245
      chgdesc.extend(changes)
2246

    
2247

    
2248
def _UpdateIvNames(base_index, disks):
2249
  """Updates the C{iv_name} attribute of disks.
2250

2251
  @type disks: list of L{objects.Disk}
2252

2253
  """
2254
  for (idx, disk) in enumerate(disks):
2255
    disk.iv_name = "disk/%s" % (base_index + idx, )
2256

    
2257

    
2258
class LUInstanceSetParams(LogicalUnit):
2259
  """Modifies an instances's parameters.
2260

2261
  """
2262
  HPATH = "instance-modify"
2263
  HTYPE = constants.HTYPE_INSTANCE
2264
  REQ_BGL = False
2265

    
2266
  @staticmethod
2267
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2268
    assert ht.TList(mods)
2269
    assert not mods or len(mods[0]) in (2, 3)
2270

    
2271
    if mods and len(mods[0]) == 2:
2272
      result = []
2273

    
2274
      addremove = 0
2275
      for op, params in mods:
2276
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2277
          result.append((op, -1, params))
2278
          addremove += 1
2279

    
2280
          if addremove > 1:
2281
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2282
                                       " supported at a time" % kind,
2283
                                       errors.ECODE_INVAL)
2284
        else:
2285
          result.append((constants.DDM_MODIFY, op, params))
2286

    
2287
      assert verify_fn(result)
2288
    else:
2289
      result = mods
2290

    
2291
    return result
2292

    
2293
  @staticmethod
2294
  def _CheckMods(kind, mods, key_types, item_fn):
2295
    """Ensures requested disk/NIC modifications are valid.
2296

2297
    """
2298
    for (op, _, params) in mods:
2299
      assert ht.TDict(params)
2300

    
2301
      # If 'key_types' is an empty dict, we assume we have an
2302
      # 'ext' template and thus do not ForceDictType
2303
      if key_types:
2304
        utils.ForceDictType(params, key_types)
2305

    
2306
      if op == constants.DDM_REMOVE:
2307
        if params:
2308
          raise errors.OpPrereqError("No settings should be passed when"
2309
                                     " removing a %s" % kind,
2310
                                     errors.ECODE_INVAL)
2311
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2312
        item_fn(op, params)
2313
      else:
2314
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2315

    
2316
  @staticmethod
2317
  def _VerifyDiskModification(op, params, excl_stor):
2318
    """Verifies a disk modification.
2319

2320
    """
2321
    if op == constants.DDM_ADD:
2322
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2323
      if mode not in constants.DISK_ACCESS_SET:
2324
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2325
                                   errors.ECODE_INVAL)
2326

    
2327
      size = params.get(constants.IDISK_SIZE, None)
2328
      if size is None:
2329
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2330
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2331
      size = int(size)
2332

    
2333
      params[constants.IDISK_SIZE] = size
2334
      name = params.get(constants.IDISK_NAME, None)
2335
      if name is not None and name.lower() == constants.VALUE_NONE:
2336
        params[constants.IDISK_NAME] = None
2337

    
2338
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2339

    
2340
    elif op == constants.DDM_MODIFY:
2341
      if constants.IDISK_SIZE in params:
2342
        raise errors.OpPrereqError("Disk size change not possible, use"
2343
                                   " grow-disk", errors.ECODE_INVAL)
2344
      if len(params) > 2:
2345
        raise errors.OpPrereqError("Disk modification doesn't support"
2346
                                   " additional arbitrary parameters",
2347
                                   errors.ECODE_INVAL)
2348
      name = params.get(constants.IDISK_NAME, None)
2349
      if name is not None and name.lower() == constants.VALUE_NONE:
2350
        params[constants.IDISK_NAME] = None
2351

    
2352
  @staticmethod
2353
  def _VerifyNicModification(op, params):
2354
    """Verifies a network interface modification.
2355

2356
    """
2357
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2358
      ip = params.get(constants.INIC_IP, None)
2359
      name = params.get(constants.INIC_NAME, None)
2360
      req_net = params.get(constants.INIC_NETWORK, None)
2361
      link = params.get(constants.NIC_LINK, None)
2362
      mode = params.get(constants.NIC_MODE, None)
2363
      if name is not None and name.lower() == constants.VALUE_NONE:
2364
        params[constants.INIC_NAME] = None
2365
      if req_net is not None:
2366
        if req_net.lower() == constants.VALUE_NONE:
2367
          params[constants.INIC_NETWORK] = None
2368
          req_net = None
2369
        elif link is not None or mode is not None:
2370
          raise errors.OpPrereqError("If network is given"
2371
                                     " mode or link should not",
2372
                                     errors.ECODE_INVAL)
2373

    
2374
      if op == constants.DDM_ADD:
2375
        macaddr = params.get(constants.INIC_MAC, None)
2376
        if macaddr is None:
2377
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2378

    
2379
      if ip is not None:
2380
        if ip.lower() == constants.VALUE_NONE:
2381
          params[constants.INIC_IP] = None
2382
        else:
2383
          if ip.lower() == constants.NIC_IP_POOL:
2384
            if op == constants.DDM_ADD and req_net is None:
2385
              raise errors.OpPrereqError("If ip=pool, parameter network"
2386
                                         " cannot be none",
2387
                                         errors.ECODE_INVAL)
2388
          else:
2389
            if not netutils.IPAddress.IsValid(ip):
2390
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2391
                                         errors.ECODE_INVAL)
2392

    
2393
      if constants.INIC_MAC in params:
2394
        macaddr = params[constants.INIC_MAC]
2395
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2396
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2397

    
2398
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2399
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2400
                                     " modifying an existing NIC",
2401
                                     errors.ECODE_INVAL)
2402

    
2403
  def CheckArguments(self):
2404
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2405
            self.op.hvparams or self.op.beparams or self.op.os_name or
2406
            self.op.osparams or self.op.offline is not None or
2407
            self.op.runtime_mem or self.op.pnode):
2408
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2409

    
2410
    if self.op.hvparams:
2411
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2412
                           "hypervisor", "instance", "cluster")
2413

    
2414
    self.op.disks = self._UpgradeDiskNicMods(
2415
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2416
    self.op.nics = self._UpgradeDiskNicMods(
2417
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2418

    
2419
    if self.op.disks and self.op.disk_template is not None:
2420
      raise errors.OpPrereqError("Disk template conversion and other disk"
2421
                                 " changes not supported at the same time",
2422
                                 errors.ECODE_INVAL)
2423

    
2424
    if (self.op.disk_template and
2425
        self.op.disk_template in constants.DTS_INT_MIRROR and
2426
        self.op.remote_node is None):
2427
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2428
                                 " one requires specifying a secondary node",
2429
                                 errors.ECODE_INVAL)
2430

    
2431
    # Check NIC modifications
2432
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2433
                    self._VerifyNicModification)
2434

    
2435
    if self.op.pnode:
2436
      (self.op.pnode_uuid, self.op.pnode) = \
2437
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2438

    
2439
  def ExpandNames(self):
2440
    self._ExpandAndLockInstance()
2441
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2442
    # Can't even acquire node locks in shared mode as upcoming changes in
2443
    # Ganeti 2.6 will start to modify the node object on disk conversion
2444
    self.needed_locks[locking.LEVEL_NODE] = []
2445
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2446
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2447
    # Look node group to look up the ipolicy
2448
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2449

    
2450
  def DeclareLocks(self, level):
2451
    if level == locking.LEVEL_NODEGROUP:
2452
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2453
      # Acquire locks for the instance's nodegroups optimistically. Needs
2454
      # to be verified in CheckPrereq
2455
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2456
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2457
    elif level == locking.LEVEL_NODE:
2458
      self._LockInstancesNodes()
2459
      if self.op.disk_template and self.op.remote_node:
2460
        (self.op.remote_node_uuid, self.op.remote_node) = \
2461
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2462
                                self.op.remote_node)
2463
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2464
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2465
      # Copy node locks
2466
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2467
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2468

    
2469
  def BuildHooksEnv(self):
2470
    """Build hooks env.
2471

2472
    This runs on the master, primary and secondaries.
2473

2474
    """
2475
    args = {}
2476
    if constants.BE_MINMEM in self.be_new:
2477
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2478
    if constants.BE_MAXMEM in self.be_new:
2479
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2480
    if constants.BE_VCPUS in self.be_new:
2481
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2482
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2483
    # information at all.
2484

    
2485
    if self._new_nics is not None:
2486
      nics = []
2487

    
2488
      for nic in self._new_nics:
2489
        n = copy.deepcopy(nic)
2490
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2491
        n.nicparams = nicparams
2492
        nics.append(NICToTuple(self, n))
2493

    
2494
      args["nics"] = nics
2495

    
2496
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2497
    if self.op.disk_template:
2498
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2499
    if self.op.runtime_mem:
2500
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2501

    
2502
    return env
2503

    
2504
  def BuildHooksNodes(self):
2505
    """Build hooks nodes.
2506

2507
    """
2508
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2509
    return (nl, nl)
2510

    
2511
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2512
                              old_params, cluster, pnode_uuid):
2513

    
2514
    update_params_dict = dict([(key, params[key])
2515
                               for key in constants.NICS_PARAMETERS
2516
                               if key in params])
2517

    
2518
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2519
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2520

    
2521
    new_net_uuid = None
2522
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2523
    if new_net_uuid_or_name:
2524
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2525
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2526

    
2527
    if old_net_uuid:
2528
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2529

    
2530
    if new_net_uuid:
2531
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2532
      if not netparams:
2533
        raise errors.OpPrereqError("No netparams found for the network"
2534
                                   " %s, probably not connected" %
2535
                                   new_net_obj.name, errors.ECODE_INVAL)
2536
      new_params = dict(netparams)
2537
    else:
2538
      new_params = GetUpdatedParams(old_params, update_params_dict)
2539

    
2540
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2541

    
2542
    new_filled_params = cluster.SimpleFillNIC(new_params)
2543
    objects.NIC.CheckParameterSyntax(new_filled_params)
2544

    
2545
    new_mode = new_filled_params[constants.NIC_MODE]
2546
    if new_mode == constants.NIC_MODE_BRIDGED:
2547
      bridge = new_filled_params[constants.NIC_LINK]
2548
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2549
      if msg:
2550
        msg = "Error checking bridges on node '%s': %s" % \
2551
                (self.cfg.GetNodeName(pnode_uuid), msg)
2552
        if self.op.force:
2553
          self.warn.append(msg)
2554
        else:
2555
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2556

    
2557
    elif new_mode == constants.NIC_MODE_ROUTED:
2558
      ip = params.get(constants.INIC_IP, old_ip)
2559
      if ip is None:
2560
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2561
                                   " on a routed NIC", errors.ECODE_INVAL)
2562

    
2563
    elif new_mode == constants.NIC_MODE_OVS:
2564
      # TODO: check OVS link
2565
      self.LogInfo("OVS links are currently not checked for correctness")
2566

    
2567
    if constants.INIC_MAC in params:
2568
      mac = params[constants.INIC_MAC]
2569
      if mac is None:
2570
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2571
                                   errors.ECODE_INVAL)
2572
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2573
        # otherwise generate the MAC address
2574
        params[constants.INIC_MAC] = \
2575
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2576
      else:
2577
        # or validate/reserve the current one
2578
        try:
2579
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2580
        except errors.ReservationError:
2581
          raise errors.OpPrereqError("MAC address '%s' already in use"
2582
                                     " in cluster" % mac,
2583
                                     errors.ECODE_NOTUNIQUE)
2584
    elif new_net_uuid != old_net_uuid:
2585

    
2586
      def get_net_prefix(net_uuid):
2587
        mac_prefix = None
2588
        if net_uuid:
2589
          nobj = self.cfg.GetNetwork(net_uuid)
2590
          mac_prefix = nobj.mac_prefix
2591

    
2592
        return mac_prefix
2593

    
2594
      new_prefix = get_net_prefix(new_net_uuid)
2595
      old_prefix = get_net_prefix(old_net_uuid)
2596
      if old_prefix != new_prefix:
2597
        params[constants.INIC_MAC] = \
2598
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2599

    
2600
    # if there is a change in (ip, network) tuple
2601
    new_ip = params.get(constants.INIC_IP, old_ip)
2602
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2603
      if new_ip:
2604
        # if IP is pool then require a network and generate one IP
2605
        if new_ip.lower() == constants.NIC_IP_POOL:
2606
          if new_net_uuid:
2607
            try:
2608
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2609
            except errors.ReservationError:
2610
              raise errors.OpPrereqError("Unable to get a free IP"
2611
                                         " from the address pool",
2612
                                         errors.ECODE_STATE)
2613
            self.LogInfo("Chose IP %s from network %s",
2614
                         new_ip,
2615
                         new_net_obj.name)
2616
            params[constants.INIC_IP] = new_ip
2617
          else:
2618
            raise errors.OpPrereqError("ip=pool, but no network found",
2619
                                       errors.ECODE_INVAL)
2620
        # Reserve new IP if in the new network if any
2621
        elif new_net_uuid:
2622
          try:
2623
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2624
            self.LogInfo("Reserving IP %s in network %s",
2625
                         new_ip, new_net_obj.name)
2626
          except errors.ReservationError:
2627
            raise errors.OpPrereqError("IP %s not available in network %s" %
2628
                                       (new_ip, new_net_obj.name),
2629
                                       errors.ECODE_NOTUNIQUE)
2630
        # new network is None so check if new IP is a conflicting IP
2631
        elif self.op.conflicts_check:
2632
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2633

    
2634
      # release old IP if old network is not None
2635
      if old_ip and old_net_uuid:
2636
        try:
2637
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2638
        except errors.AddressPoolError:
2639
          logging.warning("Release IP %s not contained in network %s",
2640
                          old_ip, old_net_obj.name)
2641

    
2642
    # there are no changes in (ip, network) tuple and old network is not None
2643
    elif (old_net_uuid is not None and
2644
          (req_link is not None or req_mode is not None)):
2645
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2646
                                 " a NIC that is connected to a network",
2647
                                 errors.ECODE_INVAL)
2648

    
2649
    private.params = new_params
2650
    private.filled = new_filled_params
2651

    
2652
  def _PreCheckDiskTemplate(self, pnode_info):
2653
    """CheckPrereq checks related to a new disk template."""
2654
    # Arguments are passed to avoid configuration lookups
2655
    pnode_uuid = self.instance.primary_node
2656
    if self.instance.disk_template == self.op.disk_template:
2657
      raise errors.OpPrereqError("Instance already has disk template %s" %
2658
                                 self.instance.disk_template,
2659
                                 errors.ECODE_INVAL)
2660

    
2661
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2662
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2663
                                 " cluster." % self.op.disk_template)
2664

    
2665
    if (self.instance.disk_template,
2666
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2667
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2668
                                 " %s to %s" % (self.instance.disk_template,
2669
                                                self.op.disk_template),
2670
                                 errors.ECODE_INVAL)
2671
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2672
                       msg="cannot change disk template")
2673
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2674
      if self.op.remote_node_uuid == pnode_uuid:
2675
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2676
                                   " as the primary node of the instance" %
2677
                                   self.op.remote_node, errors.ECODE_STATE)
2678
      CheckNodeOnline(self, self.op.remote_node_uuid)
2679
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2680
      # FIXME: here we assume that the old instance type is DT_PLAIN
2681
      assert self.instance.disk_template == constants.DT_PLAIN
2682
      disks = [{constants.IDISK_SIZE: d.size,
2683
                constants.IDISK_VG: d.logical_id[0]}
2684
               for d in self.instance.disks]
2685
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2686
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2687

    
2688
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2689
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2690
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2691
                                                              snode_group)
2692
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2693
                             ignore=self.op.ignore_ipolicy)
2694
      if pnode_info.group != snode_info.group:
2695
        self.LogWarning("The primary and secondary nodes are in two"
2696
                        " different node groups; the disk parameters"
2697
                        " from the first disk's node group will be"
2698
                        " used")
2699

    
2700
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2701
      # Make sure none of the nodes require exclusive storage
2702
      nodes = [pnode_info]
2703
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2704
        assert snode_info
2705
        nodes.append(snode_info)
2706
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2707
      if compat.any(map(has_es, nodes)):
2708
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2709
                  " storage is enabled" % (self.instance.disk_template,
2710
                                           self.op.disk_template))
2711
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2712

    
2713
  def _PreCheckDisks(self, ispec):
2714
    """CheckPrereq checks related to disk changes.
2715

2716
    @type ispec: dict
2717
    @param ispec: instance specs to be updated with the new disks
2718

2719
    """
2720
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2721

    
2722
    excl_stor = compat.any(
2723
      rpc.GetExclusiveStorageForNodes(self.cfg,
2724
                                      self.instance.all_nodes).values()
2725
      )
2726

    
2727
    # Check disk modifications. This is done here and not in CheckArguments
2728
    # (as with NICs), because we need to know the instance's disk template
2729
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2730
    if self.instance.disk_template == constants.DT_EXT:
2731
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2732
    else:
2733
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2734
                      ver_fn)
2735

    
2736
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2737

    
2738
    # Check the validity of the `provider' parameter
2739
    if self.instance.disk_template in constants.DT_EXT:
2740
      for mod in self.diskmod:
2741
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2742
        if mod[0] == constants.DDM_ADD:
2743
          if ext_provider is None:
2744
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2745
                                       " '%s' missing, during disk add" %
2746
                                       (constants.DT_EXT,
2747
                                        constants.IDISK_PROVIDER),
2748
                                       errors.ECODE_NOENT)
2749
        elif mod[0] == constants.DDM_MODIFY:
2750
          if ext_provider:
2751
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2752
                                       " modification" %
2753
                                       constants.IDISK_PROVIDER,
2754
                                       errors.ECODE_INVAL)
2755
    else:
2756
      for mod in self.diskmod:
2757
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2758
        if ext_provider is not None:
2759
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2760
                                     " instances of type '%s'" %
2761
                                     (constants.IDISK_PROVIDER,
2762
                                      constants.DT_EXT),
2763
                                     errors.ECODE_INVAL)
2764

    
2765
    if not self.op.wait_for_sync and self.instance.disks_active:
2766
      for mod in self.diskmod:
2767
        if mod[0] == constants.DDM_ADD:
2768
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2769
                                     " activated disks and"
2770
                                     " --no-wait-for-sync given.",
2771
                                     errors.ECODE_INVAL)
2772

    
2773
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2774
      raise errors.OpPrereqError("Disk operations not supported for"
2775
                                 " diskless instances", errors.ECODE_INVAL)
2776

    
2777
    def _PrepareDiskMod(_, disk, params, __):
2778
      disk.name = params.get(constants.IDISK_NAME, None)
2779

    
2780
    # Verify disk changes (operating on a copy)
2781
    disks = copy.deepcopy(self.instance.disks)
2782
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2783
                        _PrepareDiskMod, None)
2784
    utils.ValidateDeviceNames("disk", disks)
2785
    if len(disks) > constants.MAX_DISKS:
2786
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2787
                                 " more" % constants.MAX_DISKS,
2788
                                 errors.ECODE_STATE)
2789
    disk_sizes = [disk.size for disk in self.instance.disks]
2790
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2791
                      self.diskmod if op == constants.DDM_ADD)
2792
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2793
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2794

    
2795
    if self.op.offline is not None and self.op.offline:
2796
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2797
                         msg="can't change to offline")
2798

    
2799
  def CheckPrereq(self):
2800
    """Check prerequisites.
2801

2802
    This only checks the instance list against the existing names.
2803

2804
    """
2805
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2806
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2807
    self.cluster = self.cfg.GetClusterInfo()
2808
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2809

    
2810
    assert self.instance is not None, \
2811
      "Cannot retrieve locked instance %s" % self.op.instance_name
2812

    
2813
    pnode_uuid = self.instance.primary_node
2814

    
2815
    self.warn = []
2816

    
2817
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2818
        not self.op.force):
2819
      # verify that the instance is not up
2820
      instance_info = self.rpc.call_instance_info(
2821
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2822
          cluster_hvparams)
2823
      if instance_info.fail_msg:
2824
        self.warn.append("Can't get instance runtime information: %s" %
2825
                         instance_info.fail_msg)
2826
      elif instance_info.payload:
2827
        raise errors.OpPrereqError("Instance is still running on %s" %
2828
                                   self.cfg.GetNodeName(pnode_uuid),
2829
                                   errors.ECODE_STATE)
2830

    
2831
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2832
    node_uuids = list(self.instance.all_nodes)
2833
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2834

    
2835
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2836
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2837
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2838

    
2839
    # dictionary with instance information after the modification
2840
    ispec = {}
2841

    
2842
    if self.op.hotplug:
2843
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2844
                                               self.instance)
2845
      result.Raise("Hotplug is not supported.")
2846

    
2847
    # Prepare NIC modifications
2848
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2849

    
2850
    # OS change
2851
    if self.op.os_name and not self.op.force:
2852
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2853
                     self.op.force_variant)
2854
      instance_os = self.op.os_name
2855
    else:
2856
      instance_os = self.instance.os
2857

    
2858
    assert not (self.op.disk_template and self.op.disks), \
2859
      "Can't modify disk template and apply disk changes at the same time"
2860

    
2861
    if self.op.disk_template:
2862
      self._PreCheckDiskTemplate(pnode_info)
2863

    
2864
    self._PreCheckDisks(ispec)
2865

    
2866
    # hvparams processing
2867
    if self.op.hvparams:
2868
      hv_type = self.instance.hypervisor
2869
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2870
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2871
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2872

    
2873
      # local check
2874
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2875
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2876
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2877
      self.hv_inst = i_hvdict # the new dict (without defaults)
2878
    else:
2879
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2880
                                                   self.instance.os,
2881
                                                   self.instance.hvparams)
2882
      self.hv_new = self.hv_inst = {}
2883

    
2884
    # beparams processing
2885
    if self.op.beparams:
2886
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2887
                                  use_none=True)
2888
      objects.UpgradeBeParams(i_bedict)
2889
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2890
      be_new = self.cluster.SimpleFillBE(i_bedict)
2891
      self.be_proposed = self.be_new = be_new # the new actual values
2892
      self.be_inst = i_bedict # the new dict (without defaults)
2893
    else:
2894
      self.be_new = self.be_inst = {}
2895
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2896
    be_old = self.cluster.FillBE(self.instance)
2897

    
2898
    # CPU param validation -- checking every time a parameter is
2899
    # changed to cover all cases where either CPU mask or vcpus have
2900
    # changed
2901
    if (constants.BE_VCPUS in self.be_proposed and
2902
        constants.HV_CPU_MASK in self.hv_proposed):
2903
      cpu_list = \
2904
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2905
      # Verify mask is consistent with number of vCPUs. Can skip this
2906
      # test if only 1 entry in the CPU mask, which means same mask
2907
      # is applied to all vCPUs.
2908
      if (len(cpu_list) > 1 and
2909
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2910
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2911
                                   " CPU mask [%s]" %
2912
                                   (self.be_proposed[constants.BE_VCPUS],
2913
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2914
                                   errors.ECODE_INVAL)
2915

    
2916
      # Only perform this test if a new CPU mask is given
2917
      if constants.HV_CPU_MASK in self.hv_new:
2918
        # Calculate the largest CPU number requested
2919
        max_requested_cpu = max(map(max, cpu_list))
2920
        # Check that all of the instance's nodes have enough physical CPUs to
2921
        # satisfy the requested CPU mask
2922
        hvspecs = [(self.instance.hypervisor,
2923
                    self.cfg.GetClusterInfo()
2924
                      .hvparams[self.instance.hypervisor])]
2925
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2926
                                max_requested_cpu + 1,
2927
                                hvspecs)
2928

    
2929
    # osparams processing
2930
    if self.op.osparams:
2931
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2932
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2933
      self.os_inst = i_osdict # the new dict (without defaults)
2934
    else:
2935
      self.os_inst = {}
2936

    
2937
    #TODO(dynmem): do the appropriate check involving MINMEM
2938
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2939
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2940
      mem_check_list = [pnode_uuid]
2941
      if be_new[constants.BE_AUTO_BALANCE]:
2942
        # either we changed auto_balance to yes or it was from before
2943
        mem_check_list.extend(self.instance.secondary_nodes)
2944
      instance_info = self.rpc.call_instance_info(
2945
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2946
          cluster_hvparams)
2947
      hvspecs = [(self.instance.hypervisor,
2948
                  cluster_hvparams)]
2949
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2950
                                         hvspecs)
2951
      pninfo = nodeinfo[pnode_uuid]
2952
      msg = pninfo.fail_msg
2953
      if msg:
2954
        # Assume the primary node is unreachable and go ahead
2955
        self.warn.append("Can't get info from primary node %s: %s" %
2956
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2957
      else:
2958
        (_, _, (pnhvinfo, )) = pninfo.payload
2959
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2960
          self.warn.append("Node data from primary node %s doesn't contain"
2961
                           " free memory information" %
2962
                           self.cfg.GetNodeName(pnode_uuid))
2963
        elif instance_info.fail_msg:
2964
          self.warn.append("Can't get instance runtime information: %s" %
2965
                           instance_info.fail_msg)
2966
        else:
2967
          if instance_info.payload:
2968
            current_mem = int(instance_info.payload["memory"])
2969
          else:
2970
            # Assume instance not running
2971
            # (there is a slight race condition here, but it's not very
2972
            # probable, and we have no other way to check)
2973
            # TODO: Describe race condition
2974
            current_mem = 0
2975
          #TODO(dynmem): do the appropriate check involving MINMEM
2976
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2977
                      pnhvinfo["memory_free"])
2978
          if miss_mem > 0:
2979
            raise errors.OpPrereqError("This change will prevent the instance"
2980
                                       " from starting, due to %d MB of memory"
2981
                                       " missing on its primary node" %
2982
                                       miss_mem, errors.ECODE_NORES)
2983

    
2984
      if be_new[constants.BE_AUTO_BALANCE]:
2985
        for node_uuid, nres in nodeinfo.items():
2986
          if node_uuid not in self.instance.secondary_nodes:
2987
            continue
2988
          nres.Raise("Can't get info from secondary node %s" %
2989
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2990
                     ecode=errors.ECODE_STATE)
2991
          (_, _, (nhvinfo, )) = nres.payload
2992
          if not isinstance(nhvinfo.get("memory_free", None), int):
2993
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2994
                                       " memory information" %
2995
                                       self.cfg.GetNodeName(node_uuid),
2996
                                       errors.ECODE_STATE)
2997
          #TODO(dynmem): do the appropriate check involving MINMEM
2998
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2999
            raise errors.OpPrereqError("This change will prevent the instance"
3000
                                       " from failover to its secondary node"
3001
                                       " %s, due to not enough memory" %
3002
                                       self.cfg.GetNodeName(node_uuid),
3003
                                       errors.ECODE_STATE)
3004

    
3005
    if self.op.runtime_mem:
3006
      remote_info = self.rpc.call_instance_info(
3007
         self.instance.primary_node, self.instance.name,
3008
         self.instance.hypervisor,
3009
         cluster_hvparams)
3010
      remote_info.Raise("Error checking node %s" %
3011
                        self.cfg.GetNodeName(self.instance.primary_node))
3012
      if not remote_info.payload: # not running already
3013
        raise errors.OpPrereqError("Instance %s is not running" %
3014
                                   self.instance.name, errors.ECODE_STATE)
3015

    
3016
      current_memory = remote_info.payload["memory"]
3017
      if (not self.op.force and
3018
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3019
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3020
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3021
                                   " and %d MB of memory unless --force is"
3022
                                   " given" %
3023
                                   (self.instance.name,
3024
                                    self.be_proposed[constants.BE_MINMEM],
3025
                                    self.be_proposed[constants.BE_MAXMEM]),
3026
                                   errors.ECODE_INVAL)
3027

    
3028
      delta = self.op.runtime_mem - current_memory
3029
      if delta > 0:
3030
        CheckNodeFreeMemory(
3031
            self, self.instance.primary_node,
3032
            "ballooning memory for instance %s" % self.instance.name, delta,
3033
            self.instance.hypervisor,
3034
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3035

    
3036
    # make self.cluster visible in the functions below
3037
    cluster = self.cluster
3038

    
3039
    def _PrepareNicCreate(_, params, private):
3040
      self._PrepareNicModification(params, private, None, None,
3041
                                   {}, cluster, pnode_uuid)
3042
      return (None, None)
3043

    
3044
    def _PrepareNicMod(_, nic, params, private):
3045
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3046
                                   nic.nicparams, cluster, pnode_uuid)
3047
      return None
3048

    
3049
    def _PrepareNicRemove(_, params, __):
3050
      ip = params.ip
3051
      net = params.network
3052
      if net is not None and ip is not None:
3053
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3054

    
3055
    # Verify NIC changes (operating on copy)
3056
    nics = self.instance.nics[:]
3057
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3058
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3059
    if len(nics) > constants.MAX_NICS:
3060
      raise errors.OpPrereqError("Instance has too many network interfaces"
3061
                                 " (%d), cannot add more" % constants.MAX_NICS,
3062
                                 errors.ECODE_STATE)
3063

    
3064
    # Pre-compute NIC changes (necessary to use result in hooks)
3065
    self._nic_chgdesc = []
3066
    if self.nicmod:
3067
      # Operate on copies as this is still in prereq
3068
      nics = [nic.Copy() for nic in self.instance.nics]
3069
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3070
                          self._CreateNewNic, self._ApplyNicMods,
3071
                          self._RemoveNic)
3072
      # Verify that NIC names are unique and valid
3073
      utils.ValidateDeviceNames("NIC", nics)
3074
      self._new_nics = nics
3075
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3076
    else:
3077
      self._new_nics = None
3078
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3079

    
3080
    if not self.op.ignore_ipolicy:
3081
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3082
                                                              group_info)
3083

    
3084
      # Fill ispec with backend parameters
3085
      ispec[constants.ISPEC_SPINDLE_USE] = \
3086
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3087
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3088
                                                         None)
3089

    
3090
      # Copy ispec to verify parameters with min/max values separately
3091
      if self.op.disk_template:
3092
        new_disk_template = self.op.disk_template
3093
      else:
3094
        new_disk_template = self.instance.disk_template
3095
      ispec_max = ispec.copy()
3096
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3097
        self.be_new.get(constants.BE_MAXMEM, None)
3098
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3099
                                                     new_disk_template)
3100
      ispec_min = ispec.copy()
3101
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3102
        self.be_new.get(constants.BE_MINMEM, None)
3103
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3104
                                                     new_disk_template)
3105

    
3106
      if (res_max or res_min):
3107
        # FIXME: Improve error message by including information about whether
3108
        # the upper or lower limit of the parameter fails the ipolicy.
3109
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3110
               (group_info, group_info.name,
3111
                utils.CommaJoin(set(res_max + res_min))))
3112
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3113

    
3114
  def _ConvertPlainToDrbd(self, feedback_fn):
3115
    """Converts an instance from plain to drbd.
3116

3117
    """
3118
    feedback_fn("Converting template to drbd")
3119
    pnode_uuid = self.instance.primary_node
3120
    snode_uuid = self.op.remote_node_uuid
3121

    
3122
    assert self.instance.disk_template == constants.DT_PLAIN
3123

    
3124
    # create a fake disk info for _GenerateDiskTemplate
3125
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3126
                  constants.IDISK_VG: d.logical_id[0],
3127
                  constants.IDISK_NAME: d.name}
3128
                 for d in self.instance.disks]
3129
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3130
                                     self.instance.uuid, pnode_uuid,
3131
                                     [snode_uuid], disk_info, None, None, 0,
3132
                                     feedback_fn, self.diskparams)
3133
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3134
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3135
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3136
    info = GetInstanceInfoText(self.instance)
3137
    feedback_fn("Creating additional volumes...")
3138
    # first, create the missing data and meta devices
3139
    for disk in anno_disks:
3140
      # unfortunately this is... not too nice
3141
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3142
                           info, True, p_excl_stor)
3143
      for child in disk.children:
3144
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3145
                             s_excl_stor)
3146
    # at this stage, all new LVs have been created, we can rename the
3147
    # old ones
3148
    feedback_fn("Renaming original volumes...")
3149
    rename_list = [(o, n.children[0].logical_id)
3150
                   for (o, n) in zip(self.instance.disks, new_disks)]
3151
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3152
    result.Raise("Failed to rename original LVs")
3153

    
3154
    feedback_fn("Initializing DRBD devices...")
3155
    # all child devices are in place, we can now create the DRBD devices
3156
    try:
3157
      for disk in anno_disks:
3158
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3159
                                       (snode_uuid, s_excl_stor)]:
3160
          f_create = node_uuid == pnode_uuid
3161
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3162
                               f_create, excl_stor)
3163
    except errors.GenericError, e:
3164
      feedback_fn("Initializing of DRBD devices failed;"
3165
                  " renaming back original volumes...")
3166
      rename_back_list = [(n.children[0], o.logical_id)
3167
                          for (n, o) in zip(new_disks, self.instance.disks)]
3168
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3169
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3170
      raise
3171

    
3172
    # at this point, the instance has been modified
3173
    self.instance.disk_template = constants.DT_DRBD8
3174
    self.instance.disks = new_disks
3175
    self.cfg.Update(self.instance, feedback_fn)
3176

    
3177
    # Release node locks while waiting for sync
3178
    ReleaseLocks(self, locking.LEVEL_NODE)
3179

    
3180
    # disks are created, waiting for sync
3181
    disk_abort = not WaitForSync(self, self.instance,
3182
                                 oneshot=not self.op.wait_for_sync)
3183
    if disk_abort:
3184
      raise errors.OpExecError("There are some degraded disks for"
3185
                               " this instance, please cleanup manually")
3186

    
3187
    # Node resource locks will be released by caller
3188

    
3189
  def _ConvertDrbdToPlain(self, feedback_fn):
3190
    """Converts an instance from drbd to plain.
3191

3192
    """
3193
    assert len(self.instance.secondary_nodes) == 1
3194
    assert self.instance.disk_template == constants.DT_DRBD8
3195

    
3196
    pnode_uuid = self.instance.primary_node
3197
    snode_uuid = self.instance.secondary_nodes[0]
3198
    feedback_fn("Converting template to plain")
3199

    
3200
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3201
    new_disks = [d.children[0] for d in self.instance.disks]
3202

    
3203
    # copy over size, mode and name
3204
    for parent, child in zip(old_disks, new_disks):
3205
      child.size = parent.size
3206
      child.mode = parent.mode
3207
      child.name = parent.name
3208

    
3209
    # this is a DRBD disk, return its port to the pool
3210
    # NOTE: this must be done right before the call to cfg.Update!
3211
    for disk in old_disks:
3212
      tcp_port = disk.logical_id[2]
3213
      self.cfg.AddTcpUdpPort(tcp_port)
3214

    
3215
    # update instance structure
3216
    self.instance.disks = new_disks
3217
    self.instance.disk_template = constants.DT_PLAIN
3218
    _UpdateIvNames(0, self.instance.disks)
3219
    self.cfg.Update(self.instance, feedback_fn)
3220

    
3221
    # Release locks in case removing disks takes a while
3222
    ReleaseLocks(self, locking.LEVEL_NODE)
3223

    
3224
    feedback_fn("Removing volumes on the secondary node...")
3225
    for disk in old_disks:
3226
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3227
      result.Warn("Could not remove block device %s on node %s,"
3228
                  " continuing anyway" %
3229
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3230
                  self.LogWarning)
3231

    
3232
    feedback_fn("Removing unneeded volumes on the primary node...")
3233
    for idx, disk in enumerate(old_disks):
3234
      meta = disk.children[1]
3235
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3236
      result.Warn("Could not remove metadata for disk %d on node %s,"
3237
                  " continuing anyway" %
3238
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3239
                  self.LogWarning)
3240

    
3241
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3242
    self.LogInfo("Trying to hotplug device...")
3243
    msg = "hotplug:"
3244
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3245
                                          self.instance, action, dev_type,
3246
                                          (device, self.instance),
3247
                                          extra, seq)
3248
    if result.fail_msg:
3249
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3250
      self.LogInfo("Continuing execution..")
3251
      msg += "failed"
3252
    else:
3253
      self.LogInfo("Hotplug done.")
3254
      msg += "done"
3255
    return msg
3256

    
3257
  def _CreateNewDisk(self, idx, params, _):
3258
    """Creates a new disk.
3259

3260
    """
3261
    # add a new disk
3262
    if self.instance.disk_template in constants.DTS_FILEBASED:
3263
      (file_driver, file_path) = self.instance.disks[0].logical_id
3264
      file_path = os.path.dirname(file_path)
3265
    else:
3266
      file_driver = file_path = None
3267

    
3268
    disk = \
3269
      GenerateDiskTemplate(self, self.instance.disk_template,
3270
                           self.instance.uuid, self.instance.primary_node,
3271
                           self.instance.secondary_nodes, [params], file_path,
3272
                           file_driver, idx, self.Log, self.diskparams)[0]
3273

    
3274
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3275

    
3276
    if self.cluster.prealloc_wipe_disks:
3277
      # Wipe new disk
3278
      WipeOrCleanupDisks(self, self.instance,
3279
                         disks=[(idx, disk, 0)],
3280
                         cleanup=new_disks)
3281

    
3282
    changes = [
3283
      ("disk/%d" % idx,
3284
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3285
      ]
3286
    if self.op.hotplug:
3287
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3288
                                               (disk, self.instance),
3289
                                               self.instance.name, True, idx)
3290
      if result.fail_msg:
3291
        changes.append(("disk/%d" % idx, "assemble:failed"))
3292
        self.LogWarning("Can't assemble newly created disk %d: %s",
3293
                        idx, result.fail_msg)
3294
      else:
3295
        _, link_name = result.payload
3296
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3297
                                  constants.HOTPLUG_TARGET_DISK,
3298
                                  disk, link_name, idx)
3299
        changes.append(("disk/%d" % idx, msg))
3300

    
3301
    return (disk, changes)
3302

    
3303
  def _PostAddDisk(self, _, disk):
3304
    if not WaitForSync(self, self.instance, disks=[disk],
3305
                       oneshot=not self.op.wait_for_sync):
3306
      raise errors.OpExecError("Failed to sync disks of %s" %
3307
                               self.instance.name)
3308

    
3309
    # the disk is active at this point, so deactivate it if the instance disks
3310
    # are supposed to be inactive
3311
    if not self.instance.disks_active:
3312
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3313

    
3314
  @staticmethod
3315
  def _ModifyDisk(idx, disk, params, _):
3316
    """Modifies a disk.
3317

3318
    """
3319
    changes = []
3320
    mode = params.get(constants.IDISK_MODE, None)
3321
    if mode:
3322
      disk.mode = mode
3323
      changes.append(("disk.mode/%d" % idx, disk.mode))
3324

    
3325
    name = params.get(constants.IDISK_NAME, None)
3326
    disk.name = name
3327
    changes.append(("disk.name/%d" % idx, disk.name))
3328

    
3329
    return changes
3330

    
3331
  def _RemoveDisk(self, idx, root, _):
3332
    """Removes a disk.
3333

3334
    """
3335
    hotmsg = ""
3336
    if self.op.hotplug:
3337
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3338
                                   constants.HOTPLUG_TARGET_DISK,
3339
                                   root, None, idx)
3340
      ShutdownInstanceDisks(self, self.instance, [root])
3341

    
3342
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3343
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3344
                             self.instance.primary_node):
3345
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3346
              .fail_msg
3347
      if msg:
3348
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3349
                        " continuing anyway", idx,
3350
                        self.cfg.GetNodeName(node_uuid), msg)
3351

    
3352
    # if this is a DRBD disk, return its port to the pool
3353
    if root.dev_type in constants.DTS_DRBD:
3354
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3355

    
3356
    return hotmsg
3357

    
3358
  def _CreateNewNic(self, idx, params, private):
3359
    """Creates data structure for a new network interface.
3360

3361
    """
3362
    mac = params[constants.INIC_MAC]
3363
    ip = params.get(constants.INIC_IP, None)
3364
    net = params.get(constants.INIC_NETWORK, None)
3365
    name = params.get(constants.INIC_NAME, None)
3366
    net_uuid = self.cfg.LookupNetwork(net)
3367
    #TODO: not private.filled?? can a nic have no nicparams??
3368
    nicparams = private.filled
3369
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3370
                       nicparams=nicparams)
3371
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3372

    
3373
    changes = [
3374
      ("nic.%d" % idx,
3375
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3376
       (mac, ip, private.filled[constants.NIC_MODE],
3377
       private.filled[constants.NIC_LINK], net)),
3378
      ]
3379

    
3380
    if self.op.hotplug:
3381
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3382
                                constants.HOTPLUG_TARGET_NIC,
3383
                                nobj, None, idx)
3384
      changes.append(("nic.%d" % idx, msg))
3385

    
3386
    return (nobj, changes)
3387

    
3388
  def _ApplyNicMods(self, idx, nic, params, private):
3389
    """Modifies a network interface.
3390

3391
    """
3392
    changes = []
3393

    
3394
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3395
      if key in params:
3396
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3397
        setattr(nic, key, params[key])
3398

    
3399
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3400
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3401
    if new_net_uuid != nic.network:
3402
      changes.append(("nic.network/%d" % idx, new_net))
3403
      nic.network = new_net_uuid
3404

    
3405
    if private.filled:
3406
      nic.nicparams = private.filled
3407

    
3408
      for (key, val) in nic.nicparams.items():
3409
        changes.append(("nic.%s/%d" % (key, idx), val))
3410

    
3411
    if self.op.hotplug:
3412
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3413
                                constants.HOTPLUG_TARGET_NIC,
3414
                                nic, None, idx)
3415
      changes.append(("nic/%d" % idx, msg))
3416

    
3417
    return changes
3418

    
3419
  def _RemoveNic(self, idx, nic, _):
3420
    if self.op.hotplug:
3421
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3422
                                 constants.HOTPLUG_TARGET_NIC,
3423
                                 nic, None, idx)
3424

    
3425
  def Exec(self, feedback_fn):
3426
    """Modifies an instance.
3427

3428
    All parameters take effect only at the next restart of the instance.
3429

3430
    """
3431
    # Process here the warnings from CheckPrereq, as we don't have a
3432
    # feedback_fn there.
3433
    # TODO: Replace with self.LogWarning
3434
    for warn in self.warn:
3435
      feedback_fn("WARNING: %s" % warn)
3436

    
3437
    assert ((self.op.disk_template is None) ^
3438
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3439
      "Not owning any node resource locks"
3440

    
3441
    result = []
3442

    
3443
    # New primary node
3444
    if self.op.pnode_uuid:
3445
      self.instance.primary_node = self.op.pnode_uuid
3446

    
3447
    # runtime memory
3448
    if self.op.runtime_mem:
3449
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3450
                                                     self.instance,
3451
                                                     self.op.runtime_mem)
3452
      rpcres.Raise("Cannot modify instance runtime memory")
3453
      result.append(("runtime_memory", self.op.runtime_mem))
3454

    
3455
    # Apply disk changes
3456
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3457
                        self._CreateNewDisk, self._ModifyDisk,
3458
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3459
    _UpdateIvNames(0, self.instance.disks)
3460

    
3461
    if self.op.disk_template:
3462
      if __debug__:
3463
        check_nodes = set(self.instance.all_nodes)
3464
        if self.op.remote_node_uuid:
3465
          check_nodes.add(self.op.remote_node_uuid)
3466
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3467
          owned = self.owned_locks(level)
3468
          assert not (check_nodes - owned), \
3469
            ("Not owning the correct locks, owning %r, expected at least %r" %
3470
             (owned, check_nodes))
3471

    
3472
      r_shut = ShutdownInstanceDisks(self, self.instance)
3473
      if not r_shut:
3474
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3475
                                 " proceed with disk template conversion")
3476
      mode = (self.instance.disk_template, self.op.disk_template)
3477
      try:
3478
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3479
      except:
3480
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3481
        raise
3482
      result.append(("disk_template", self.op.disk_template))
3483

    
3484
      assert self.instance.disk_template == self.op.disk_template, \
3485
        ("Expected disk template '%s', found '%s'" %
3486
         (self.op.disk_template, self.instance.disk_template))
3487

    
3488
    # Release node and resource locks if there are any (they might already have
3489
    # been released during disk conversion)
3490
    ReleaseLocks(self, locking.LEVEL_NODE)
3491
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3492

    
3493
    # Apply NIC changes
3494
    if self._new_nics is not None:
3495
      self.instance.nics = self._new_nics
3496
      result.extend(self._nic_chgdesc)
3497

    
3498
    # hvparams changes
3499
    if self.op.hvparams:
3500
      self.instance.hvparams = self.hv_inst
3501
      for key, val in self.op.hvparams.iteritems():
3502
        result.append(("hv/%s" % key, val))
3503

    
3504
    # beparams changes
3505
    if self.op.beparams:
3506
      self.instance.beparams = self.be_inst
3507
      for key, val in self.op.beparams.iteritems():
3508
        result.append(("be/%s" % key, val))
3509

    
3510
    # OS change
3511
    if self.op.os_name:
3512
      self.instance.os = self.op.os_name
3513

    
3514
    # osparams changes
3515
    if self.op.osparams:
3516
      self.instance.osparams = self.os_inst
3517
      for key, val in self.op.osparams.iteritems():
3518
        result.append(("os/%s" % key, val))
3519

    
3520
    if self.op.offline is None:
3521
      # Ignore
3522
      pass
3523
    elif self.op.offline:
3524
      # Mark instance as offline
3525
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3526
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3527
    else:
3528
      # Mark instance as online, but stopped
3529
      self.cfg.MarkInstanceDown(self.instance.uuid)
3530
      result.append(("admin_state", constants.ADMINST_DOWN))
3531

    
3532
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3533

    
3534
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3535
                self.owned_locks(locking.LEVEL_NODE)), \
3536
      "All node locks should have been released by now"
3537

    
3538
    return result
3539

    
3540
  _DISK_CONVERSIONS = {
3541
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3542
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3543
    }
3544

    
3545

    
3546
class LUInstanceChangeGroup(LogicalUnit):
3547
  HPATH = "instance-change-group"
3548
  HTYPE = constants.HTYPE_INSTANCE
3549
  REQ_BGL = False
3550

    
3551
  def ExpandNames(self):
3552
    self.share_locks = ShareAll()
3553

    
3554
    self.needed_locks = {
3555
      locking.LEVEL_NODEGROUP: [],
3556
      locking.LEVEL_NODE: [],
3557
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3558
      }
3559

    
3560
    self._ExpandAndLockInstance()
3561

    
3562
    if self.op.target_groups:
3563
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3564
                                  self.op.target_groups)
3565
    else:
3566
      self.req_target_uuids = None
3567

    
3568
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3569

    
3570
  def DeclareLocks(self, level):
3571
    if level == locking.LEVEL_NODEGROUP:
3572
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3573

    
3574
      if self.req_target_uuids:
3575
        lock_groups = set(self.req_target_uuids)
3576

    
3577
        # Lock all groups used by instance optimistically; this requires going
3578
        # via the node before it's locked, requiring verification later on
3579
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3580
        lock_groups.update(instance_groups)
3581
      else:
3582
        # No target groups, need to lock all of them
3583
        lock_groups = locking.ALL_SET
3584

    
3585
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3586

    
3587
    elif level == locking.LEVEL_NODE:
3588
      if self.req_target_uuids:
3589
        # Lock all nodes used by instances
3590
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3591
        self._LockInstancesNodes()
3592

    
3593
        # Lock all nodes in all potential target groups
3594
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3595
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3596
        member_nodes = [node_uuid
3597
                        for group in lock_groups
3598
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3599
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3600
      else:
3601
        # Lock all nodes as all groups are potential targets
3602
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3603

    
3604
  def CheckPrereq(self):
3605
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3606
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3607
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3608

    
3609
    assert (self.req_target_uuids is None or
3610
            owned_groups.issuperset(self.req_target_uuids))
3611
    assert owned_instance_names == set([self.op.instance_name])
3612

    
3613
    # Get instance information
3614
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3615

    
3616
    # Check if node groups for locked instance are still correct
3617
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3618
      ("Instance %s's nodes changed while we kept the lock" %
3619
       self.op.instance_name)
3620

    
3621
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3622
                                          owned_groups)
3623

    
3624
    if self.req_target_uuids:
3625
      # User requested specific target groups
3626
      self.target_uuids = frozenset(self.req_target_uuids)
3627
    else:
3628
      # All groups except those used by the instance are potential targets
3629
      self.target_uuids = owned_groups - inst_groups
3630

    
3631
    conflicting_groups = self.target_uuids & inst_groups
3632
    if conflicting_groups:
3633
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3634
                                 " used by the instance '%s'" %
3635
                                 (utils.CommaJoin(conflicting_groups),
3636
                                  self.op.instance_name),
3637
                                 errors.ECODE_INVAL)
3638

    
3639
    if not self.target_uuids:
3640
      raise errors.OpPrereqError("There are no possible target groups",
3641
                                 errors.ECODE_INVAL)
3642

    
3643
  def BuildHooksEnv(self):
3644
    """Build hooks env.
3645

3646
    """
3647
    assert self.target_uuids
3648

    
3649
    env = {
3650
      "TARGET_GROUPS": " ".join(self.target_uuids),
3651
      }
3652

    
3653
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3654

    
3655
    return env
3656

    
3657
  def BuildHooksNodes(self):
3658
    """Build hooks nodes.
3659

3660
    """
3661
    mn = self.cfg.GetMasterNode()
3662
    return ([mn], [mn])
3663

    
3664
  def Exec(self, feedback_fn):
3665
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3666

    
3667
    assert instances == [self.op.instance_name], "Instance not locked"
3668

    
3669
    req = iallocator.IAReqGroupChange(instances=instances,
3670
                                      target_groups=list(self.target_uuids))
3671
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3672

    
3673
    ial.Run(self.op.iallocator)
3674

    
3675
    if not ial.success:
3676
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3677
                                 " instance '%s' using iallocator '%s': %s" %
3678
                                 (self.op.instance_name, self.op.iallocator,
3679
                                  ial.info), errors.ECODE_NORES)
3680

    
3681
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3682

    
3683
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3684
                 " instance '%s'", len(jobs), self.op.instance_name)
3685

    
3686
    return ResultWithJobs(jobs)