Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 8106dd64

History | View | Annotate | Download (145.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234
    if vlan:
235
      nicparams[constants.NIC_VLAN] = vlan
236

    
237
    check_params = cluster.SimpleFillNIC(nicparams)
238
    objects.NIC.CheckParameterSyntax(check_params)
239
    net_uuid = cfg.LookupNetwork(net)
240
    name = nic.get(constants.INIC_NAME, None)
241
    if name is not None and name.lower() == constants.VALUE_NONE:
242
      name = None
243
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
244
                          network=net_uuid, nicparams=nicparams)
245
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
246
    nics.append(nic_obj)
247

    
248
  return nics
249

    
250

    
251
def _CheckForConflictingIp(lu, ip, node_uuid):
252
  """In case of conflicting IP address raise error.
253

254
  @type ip: string
255
  @param ip: IP address
256
  @type node_uuid: string
257
  @param node_uuid: node UUID
258

259
  """
260
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261
  if conf_net is not None:
262
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
263
                                " network %s, but the target NIC does not." %
264
                                (ip, conf_net)),
265
                               errors.ECODE_STATE)
266

    
267
  return (None, None)
268

    
269

    
270
def _ComputeIPolicyInstanceSpecViolation(
271
  ipolicy, instance_spec, disk_template,
272
  _compute_fn=ComputeIPolicySpecViolation):
273
  """Compute if instance specs meets the specs of ipolicy.
274

275
  @type ipolicy: dict
276
  @param ipolicy: The ipolicy to verify against
277
  @param instance_spec: dict
278
  @param instance_spec: The instance spec to verify
279
  @type disk_template: string
280
  @param disk_template: the disk template of the instance
281
  @param _compute_fn: The function to verify ipolicy (unittest only)
282
  @see: L{ComputeIPolicySpecViolation}
283

284
  """
285
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
286
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
287
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
288
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
289
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
290
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
291

    
292
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
293
                     disk_sizes, spindle_use, disk_template)
294

    
295

    
296
def _CheckOSVariant(os_obj, name):
297
  """Check whether an OS name conforms to the os variants specification.
298

299
  @type os_obj: L{objects.OS}
300
  @param os_obj: OS object to check
301
  @type name: string
302
  @param name: OS name passed by the user, to check for validity
303

304
  """
305
  variant = objects.OS.GetVariant(name)
306
  if not os_obj.supported_variants:
307
    if variant:
308
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
309
                                 " passed)" % (os_obj.name, variant),
310
                                 errors.ECODE_INVAL)
311
    return
312
  if not variant:
313
    raise errors.OpPrereqError("OS name must include a variant",
314
                               errors.ECODE_INVAL)
315

    
316
  if variant not in os_obj.supported_variants:
317
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
318

    
319

    
320
class LUInstanceCreate(LogicalUnit):
321
  """Create an instance.
322

323
  """
324
  HPATH = "instance-add"
325
  HTYPE = constants.HTYPE_INSTANCE
326
  REQ_BGL = False
327

    
328
  def _CheckDiskTemplateValid(self):
329
    """Checks validity of disk template.
330

331
    """
332
    cluster = self.cfg.GetClusterInfo()
333
    if self.op.disk_template is None:
334
      # FIXME: It would be better to take the default disk template from the
335
      # ipolicy, but for the ipolicy we need the primary node, which we get from
336
      # the iallocator, which wants the disk template as input. To solve this
337
      # chicken-and-egg problem, it should be possible to specify just a node
338
      # group from the iallocator and take the ipolicy from that.
339
      self.op.disk_template = cluster.enabled_disk_templates[0]
340
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
341

    
342
  def _CheckDiskArguments(self):
343
    """Checks validity of disk-related arguments.
344

345
    """
346
    # check that disk's names are unique and valid
347
    utils.ValidateDeviceNames("disk", self.op.disks)
348

    
349
    self._CheckDiskTemplateValid()
350

    
351
    # check disks. parameter names and consistent adopt/no-adopt strategy
352
    has_adopt = has_no_adopt = False
353
    for disk in self.op.disks:
354
      if self.op.disk_template != constants.DT_EXT:
355
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
356
      if constants.IDISK_ADOPT in disk:
357
        has_adopt = True
358
      else:
359
        has_no_adopt = True
360
    if has_adopt and has_no_adopt:
361
      raise errors.OpPrereqError("Either all disks are adopted or none is",
362
                                 errors.ECODE_INVAL)
363
    if has_adopt:
364
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
365
        raise errors.OpPrereqError("Disk adoption is not supported for the"
366
                                   " '%s' disk template" %
367
                                   self.op.disk_template,
368
                                   errors.ECODE_INVAL)
369
      if self.op.iallocator is not None:
370
        raise errors.OpPrereqError("Disk adoption not allowed with an"
371
                                   " iallocator script", errors.ECODE_INVAL)
372
      if self.op.mode == constants.INSTANCE_IMPORT:
373
        raise errors.OpPrereqError("Disk adoption not allowed for"
374
                                   " instance import", errors.ECODE_INVAL)
375
    else:
376
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
377
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
378
                                   " but no 'adopt' parameter given" %
379
                                   self.op.disk_template,
380
                                   errors.ECODE_INVAL)
381

    
382
    self.adopt_disks = has_adopt
383

    
384
  def _CheckVLANArguments(self):
385
    """ Check validity of VLANs if given
386

387
    """
388
    for nic in self.op.nics:
389
      vlan = nic.get(constants.INIC_VLAN, None)
390
      if vlan:
391
        if vlan[0] == ".":
392
          # vlan starting with dot means single untagged vlan,
393
          # might be followed by trunk (:)
394
          if not vlan[1:].isdigit():
395
            vlanlist = vlan[1:].split(':')
396
            for vl in vlanlist:
397
              if not vl.isdigit():
398
                raise errors.OpPrereqError("Specified VLAN parameter is "
399
                                           "invalid : %s" % vlan,
400
                                             errors.ECODE_INVAL)
401
        elif vlan[0] == ":":
402
          # Trunk - tagged only
403
          vlanlist = vlan[1:].split(':')
404
          for vl in vlanlist:
405
            if not vl.isdigit():
406
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
407
                                           " : %s" % vlan, errors.ECODE_INVAL)
408
        elif vlan.isdigit():
409
          # This is the simplest case. No dots, only single digit
410
          # -> Create untagged access port, dot needs to be added
411
          nic[constants.INIC_VLAN] = "." + vlan
412
        else:
413
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
414
                                       " : %s" % vlan, errors.ECODE_INVAL)
415

    
416
  def CheckArguments(self):
417
    """Check arguments.
418

419
    """
420
    # do not require name_check to ease forward/backward compatibility
421
    # for tools
422
    if self.op.no_install and self.op.start:
423
      self.LogInfo("No-installation mode selected, disabling startup")
424
      self.op.start = False
425
    # validate/normalize the instance name
426
    self.op.instance_name = \
427
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
428

    
429
    if self.op.ip_check and not self.op.name_check:
430
      # TODO: make the ip check more flexible and not depend on the name check
431
      raise errors.OpPrereqError("Cannot do IP address check without a name"
432
                                 " check", errors.ECODE_INVAL)
433

    
434
    # check nics' parameter names
435
    for nic in self.op.nics:
436
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
437
    # check that NIC's parameters names are unique and valid
438
    utils.ValidateDeviceNames("NIC", self.op.nics)
439

    
440
    self._CheckVLANArguments()
441

    
442
    self._CheckDiskArguments()
443
    assert self.op.disk_template is not None
444

    
445
    # instance name verification
446
    if self.op.name_check:
447
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
448
      self.op.instance_name = self.hostname.name
449
      # used in CheckPrereq for ip ping check
450
      self.check_ip = self.hostname.ip
451
    else:
452
      self.check_ip = None
453

    
454
    # file storage checks
455
    if (self.op.file_driver and
456
        not self.op.file_driver in constants.FILE_DRIVER):
457
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
458
                                 self.op.file_driver, errors.ECODE_INVAL)
459

    
460
    # set default file_driver if unset and required
461
    if (not self.op.file_driver and
462
        self.op.disk_template in constants.DTS_FILEBASED):
463
      self.op.file_driver = constants.FD_LOOP
464

    
465
    ### Node/iallocator related checks
466
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
467

    
468
    if self.op.pnode is not None:
469
      if self.op.disk_template in constants.DTS_INT_MIRROR:
470
        if self.op.snode is None:
471
          raise errors.OpPrereqError("The networked disk templates need"
472
                                     " a mirror node", errors.ECODE_INVAL)
473
      elif self.op.snode:
474
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
475
                        " template")
476
        self.op.snode = None
477

    
478
    _CheckOpportunisticLocking(self.op)
479

    
480
    if self.op.mode == constants.INSTANCE_IMPORT:
481
      # On import force_variant must be True, because if we forced it at
482
      # initial install, our only chance when importing it back is that it
483
      # works again!
484
      self.op.force_variant = True
485

    
486
      if self.op.no_install:
487
        self.LogInfo("No-installation mode has no effect during import")
488

    
489
    elif self.op.mode == constants.INSTANCE_CREATE:
490
      if self.op.os_type is None:
491
        raise errors.OpPrereqError("No guest OS specified",
492
                                   errors.ECODE_INVAL)
493
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
494
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
495
                                   " installation" % self.op.os_type,
496
                                   errors.ECODE_STATE)
497
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
498
      self._cds = GetClusterDomainSecret()
499

    
500
      # Check handshake to ensure both clusters have the same domain secret
501
      src_handshake = self.op.source_handshake
502
      if not src_handshake:
503
        raise errors.OpPrereqError("Missing source handshake",
504
                                   errors.ECODE_INVAL)
505

    
506
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
507
                                                           src_handshake)
508
      if errmsg:
509
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
510
                                   errors.ECODE_INVAL)
511

    
512
      # Load and check source CA
513
      self.source_x509_ca_pem = self.op.source_x509_ca
514
      if not self.source_x509_ca_pem:
515
        raise errors.OpPrereqError("Missing source X509 CA",
516
                                   errors.ECODE_INVAL)
517

    
518
      try:
519
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
520
                                                    self._cds)
521
      except OpenSSL.crypto.Error, err:
522
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
523
                                   (err, ), errors.ECODE_INVAL)
524

    
525
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
526
      if errcode is not None:
527
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
528
                                   errors.ECODE_INVAL)
529

    
530
      self.source_x509_ca = cert
531

    
532
      src_instance_name = self.op.source_instance_name
533
      if not src_instance_name:
534
        raise errors.OpPrereqError("Missing source instance name",
535
                                   errors.ECODE_INVAL)
536

    
537
      self.source_instance_name = \
538
        netutils.GetHostname(name=src_instance_name).name
539

    
540
    else:
541
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
542
                                 self.op.mode, errors.ECODE_INVAL)
543

    
544
  def ExpandNames(self):
545
    """ExpandNames for CreateInstance.
546

547
    Figure out the right locks for instance creation.
548

549
    """
550
    self.needed_locks = {}
551

    
552
    # this is just a preventive check, but someone might still add this
553
    # instance in the meantime, and creation will fail at lock-add time
554
    if self.op.instance_name in\
555
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
556
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
557
                                 self.op.instance_name, errors.ECODE_EXISTS)
558

    
559
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
560

    
561
    if self.op.iallocator:
562
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
563
      # specifying a group on instance creation and then selecting nodes from
564
      # that group
565
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
566
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
567

    
568
      if self.op.opportunistic_locking:
569
        self.opportunistic_locks[locking.LEVEL_NODE] = True
570
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
571
    else:
572
      (self.op.pnode_uuid, self.op.pnode) = \
573
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
574
      nodelist = [self.op.pnode_uuid]
575
      if self.op.snode is not None:
576
        (self.op.snode_uuid, self.op.snode) = \
577
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
578
        nodelist.append(self.op.snode_uuid)
579
      self.needed_locks[locking.LEVEL_NODE] = nodelist
580

    
581
    # in case of import lock the source node too
582
    if self.op.mode == constants.INSTANCE_IMPORT:
583
      src_node = self.op.src_node
584
      src_path = self.op.src_path
585

    
586
      if src_path is None:
587
        self.op.src_path = src_path = self.op.instance_name
588

    
589
      if src_node is None:
590
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
591
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
592
        self.op.src_node = None
593
        if os.path.isabs(src_path):
594
          raise errors.OpPrereqError("Importing an instance from a path"
595
                                     " requires a source node option",
596
                                     errors.ECODE_INVAL)
597
      else:
598
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
599
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
600
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
601
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
602
        if not os.path.isabs(src_path):
603
          self.op.src_path = \
604
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
605

    
606
    self.needed_locks[locking.LEVEL_NODE_RES] = \
607
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
608

    
609
  def _RunAllocator(self):
610
    """Run the allocator based on input opcode.
611

612
    """
613
    if self.op.opportunistic_locking:
614
      # Only consider nodes for which a lock is held
615
      node_name_whitelist = self.cfg.GetNodeNames(
616
        self.owned_locks(locking.LEVEL_NODE))
617
    else:
618
      node_name_whitelist = None
619

    
620
    req = _CreateInstanceAllocRequest(self.op, self.disks,
621
                                      self.nics, self.be_full,
622
                                      node_name_whitelist)
623
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
624

    
625
    ial.Run(self.op.iallocator)
626

    
627
    if not ial.success:
628
      # When opportunistic locks are used only a temporary failure is generated
629
      if self.op.opportunistic_locking:
630
        ecode = errors.ECODE_TEMP_NORES
631
      else:
632
        ecode = errors.ECODE_NORES
633

    
634
      raise errors.OpPrereqError("Can't compute nodes using"
635
                                 " iallocator '%s': %s" %
636
                                 (self.op.iallocator, ial.info),
637
                                 ecode)
638

    
639
    (self.op.pnode_uuid, self.op.pnode) = \
640
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
641
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
642
                 self.op.instance_name, self.op.iallocator,
643
                 utils.CommaJoin(ial.result))
644

    
645
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
646

    
647
    if req.RequiredNodes() == 2:
648
      (self.op.snode_uuid, self.op.snode) = \
649
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
650

    
651
  def BuildHooksEnv(self):
652
    """Build hooks env.
653

654
    This runs on master, primary and secondary nodes of the instance.
655

656
    """
657
    env = {
658
      "ADD_MODE": self.op.mode,
659
      }
660
    if self.op.mode == constants.INSTANCE_IMPORT:
661
      env["SRC_NODE"] = self.op.src_node
662
      env["SRC_PATH"] = self.op.src_path
663
      env["SRC_IMAGES"] = self.src_images
664

    
665
    env.update(BuildInstanceHookEnv(
666
      name=self.op.instance_name,
667
      primary_node_name=self.op.pnode,
668
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
669
      status=self.op.start,
670
      os_type=self.op.os_type,
671
      minmem=self.be_full[constants.BE_MINMEM],
672
      maxmem=self.be_full[constants.BE_MAXMEM],
673
      vcpus=self.be_full[constants.BE_VCPUS],
674
      nics=NICListToTuple(self, self.nics),
675
      disk_template=self.op.disk_template,
676
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
677
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
678
             for d in self.disks],
679
      bep=self.be_full,
680
      hvp=self.hv_full,
681
      hypervisor_name=self.op.hypervisor,
682
      tags=self.op.tags,
683
      ))
684

    
685
    return env
686

    
687
  def BuildHooksNodes(self):
688
    """Build hooks nodes.
689

690
    """
691
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
692
    return nl, nl
693

    
694
  def _ReadExportInfo(self):
695
    """Reads the export information from disk.
696

697
    It will override the opcode source node and path with the actual
698
    information, if these two were not specified before.
699

700
    @return: the export information
701

702
    """
703
    assert self.op.mode == constants.INSTANCE_IMPORT
704

    
705
    if self.op.src_node_uuid is None:
706
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
707
      exp_list = self.rpc.call_export_list(locked_nodes)
708
      found = False
709
      for node_uuid in exp_list:
710
        if exp_list[node_uuid].fail_msg:
711
          continue
712
        if self.op.src_path in exp_list[node_uuid].payload:
713
          found = True
714
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
715
          self.op.src_node_uuid = node_uuid
716
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
717
                                            self.op.src_path)
718
          break
719
      if not found:
720
        raise errors.OpPrereqError("No export found for relative path %s" %
721
                                   self.op.src_path, errors.ECODE_INVAL)
722

    
723
    CheckNodeOnline(self, self.op.src_node_uuid)
724
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
725
    result.Raise("No export or invalid export found in dir %s" %
726
                 self.op.src_path)
727

    
728
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
729
    if not export_info.has_section(constants.INISECT_EXP):
730
      raise errors.ProgrammerError("Corrupted export config",
731
                                   errors.ECODE_ENVIRON)
732

    
733
    ei_version = export_info.get(constants.INISECT_EXP, "version")
734
    if int(ei_version) != constants.EXPORT_VERSION:
735
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
736
                                 (ei_version, constants.EXPORT_VERSION),
737
                                 errors.ECODE_ENVIRON)
738
    return export_info
739

    
740
  def _ReadExportParams(self, einfo):
741
    """Use export parameters as defaults.
742

743
    In case the opcode doesn't specify (as in override) some instance
744
    parameters, then try to use them from the export information, if
745
    that declares them.
746

747
    """
748
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
749

    
750
    if not self.op.disks:
751
      disks = []
752
      # TODO: import the disk iv_name too
753
      for idx in range(constants.MAX_DISKS):
754
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
755
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
756
          disks.append({constants.IDISK_SIZE: disk_sz})
757
      self.op.disks = disks
758
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
759
        raise errors.OpPrereqError("No disk info specified and the export"
760
                                   " is missing the disk information",
761
                                   errors.ECODE_INVAL)
762

    
763
    if not self.op.nics:
764
      nics = []
765
      for idx in range(constants.MAX_NICS):
766
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
767
          ndict = {}
768
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
769
            nic_param_name = "nic%d_%s" % (idx, name)
770
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
771
              v = einfo.get(constants.INISECT_INS, nic_param_name)
772
              ndict[name] = v
773
          nics.append(ndict)
774
        else:
775
          break
776
      self.op.nics = nics
777

    
778
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
779
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
780

    
781
    if (self.op.hypervisor is None and
782
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
783
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
784

    
785
    if einfo.has_section(constants.INISECT_HYP):
786
      # use the export parameters but do not override the ones
787
      # specified by the user
788
      for name, value in einfo.items(constants.INISECT_HYP):
789
        if name not in self.op.hvparams:
790
          self.op.hvparams[name] = value
791

    
792
    if einfo.has_section(constants.INISECT_BEP):
793
      # use the parameters, without overriding
794
      for name, value in einfo.items(constants.INISECT_BEP):
795
        if name not in self.op.beparams:
796
          self.op.beparams[name] = value
797
        # Compatibility for the old "memory" be param
798
        if name == constants.BE_MEMORY:
799
          if constants.BE_MAXMEM not in self.op.beparams:
800
            self.op.beparams[constants.BE_MAXMEM] = value
801
          if constants.BE_MINMEM not in self.op.beparams:
802
            self.op.beparams[constants.BE_MINMEM] = value
803
    else:
804
      # try to read the parameters old style, from the main section
805
      for name in constants.BES_PARAMETERS:
806
        if (name not in self.op.beparams and
807
            einfo.has_option(constants.INISECT_INS, name)):
808
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
809

    
810
    if einfo.has_section(constants.INISECT_OSP):
811
      # use the parameters, without overriding
812
      for name, value in einfo.items(constants.INISECT_OSP):
813
        if name not in self.op.osparams:
814
          self.op.osparams[name] = value
815

    
816
  def _RevertToDefaults(self, cluster):
817
    """Revert the instance parameters to the default values.
818

819
    """
820
    # hvparams
821
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
822
    for name in self.op.hvparams.keys():
823
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
824
        del self.op.hvparams[name]
825
    # beparams
826
    be_defs = cluster.SimpleFillBE({})
827
    for name in self.op.beparams.keys():
828
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
829
        del self.op.beparams[name]
830
    # nic params
831
    nic_defs = cluster.SimpleFillNIC({})
832
    for nic in self.op.nics:
833
      for name in constants.NICS_PARAMETERS:
834
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
835
          del nic[name]
836
    # osparams
837
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
838
    for name in self.op.osparams.keys():
839
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
840
        del self.op.osparams[name]
841

    
842
  def _CalculateFileStorageDir(self):
843
    """Calculate final instance file storage dir.
844

845
    """
846
    # file storage dir calculation/check
847
    self.instance_file_storage_dir = None
848
    if self.op.disk_template in constants.DTS_FILEBASED:
849
      # build the full file storage dir path
850
      joinargs = []
851

    
852
      if self.op.disk_template in (constants.DT_SHARED_FILE,
853
                                   constants.DT_GLUSTER):
854
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
855
      else:
856
        get_fsd_fn = self.cfg.GetFileStorageDir
857

    
858
      cfg_storagedir = get_fsd_fn()
859
      if not cfg_storagedir:
860
        raise errors.OpPrereqError("Cluster file storage dir not defined",
861
                                   errors.ECODE_STATE)
862
      joinargs.append(cfg_storagedir)
863

    
864
      if self.op.file_storage_dir is not None:
865
        joinargs.append(self.op.file_storage_dir)
866

    
867
      joinargs.append(self.op.instance_name)
868

    
869
      # pylint: disable=W0142
870
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
871

    
872
  def CheckPrereq(self): # pylint: disable=R0914
873
    """Check prerequisites.
874

875
    """
876
    self._CalculateFileStorageDir()
877

    
878
    if self.op.mode == constants.INSTANCE_IMPORT:
879
      export_info = self._ReadExportInfo()
880
      self._ReadExportParams(export_info)
881
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
882
    else:
883
      self._old_instance_name = None
884

    
885
    if (not self.cfg.GetVGName() and
886
        self.op.disk_template not in constants.DTS_NOT_LVM):
887
      raise errors.OpPrereqError("Cluster does not support lvm-based"
888
                                 " instances", errors.ECODE_STATE)
889

    
890
    if (self.op.hypervisor is None or
891
        self.op.hypervisor == constants.VALUE_AUTO):
892
      self.op.hypervisor = self.cfg.GetHypervisorType()
893

    
894
    cluster = self.cfg.GetClusterInfo()
895
    enabled_hvs = cluster.enabled_hypervisors
896
    if self.op.hypervisor not in enabled_hvs:
897
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
898
                                 " cluster (%s)" %
899
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
900
                                 errors.ECODE_STATE)
901

    
902
    # Check tag validity
903
    for tag in self.op.tags:
904
      objects.TaggableObject.ValidateTag(tag)
905

    
906
    # check hypervisor parameter syntax (locally)
907
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
908
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
909
                                      self.op.hvparams)
910
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
911
    hv_type.CheckParameterSyntax(filled_hvp)
912
    self.hv_full = filled_hvp
913
    # check that we don't specify global parameters on an instance
914
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
915
                         "instance", "cluster")
916

    
917
    # fill and remember the beparams dict
918
    self.be_full = _ComputeFullBeParams(self.op, cluster)
919

    
920
    # build os parameters
921
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
922

    
923
    # now that hvp/bep are in final format, let's reset to defaults,
924
    # if told to do so
925
    if self.op.identify_defaults:
926
      self._RevertToDefaults(cluster)
927

    
928
    # NIC buildup
929
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
930
                             self.proc.GetECId())
931

    
932
    # disk checks/pre-build
933
    default_vg = self.cfg.GetVGName()
934
    self.disks = ComputeDisks(self.op, default_vg)
935

    
936
    if self.op.mode == constants.INSTANCE_IMPORT:
937
      disk_images = []
938
      for idx in range(len(self.disks)):
939
        option = "disk%d_dump" % idx
940
        if export_info.has_option(constants.INISECT_INS, option):
941
          # FIXME: are the old os-es, disk sizes, etc. useful?
942
          export_name = export_info.get(constants.INISECT_INS, option)
943
          image = utils.PathJoin(self.op.src_path, export_name)
944
          disk_images.append(image)
945
        else:
946
          disk_images.append(False)
947

    
948
      self.src_images = disk_images
949

    
950
      if self.op.instance_name == self._old_instance_name:
951
        for idx, nic in enumerate(self.nics):
952
          if nic.mac == constants.VALUE_AUTO:
953
            nic_mac_ini = "nic%d_mac" % idx
954
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
955

    
956
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
957

    
958
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
959
    if self.op.ip_check:
960
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
961
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
962
                                   (self.check_ip, self.op.instance_name),
963
                                   errors.ECODE_NOTUNIQUE)
964

    
965
    #### mac address generation
966
    # By generating here the mac address both the allocator and the hooks get
967
    # the real final mac address rather than the 'auto' or 'generate' value.
968
    # There is a race condition between the generation and the instance object
969
    # creation, which means that we know the mac is valid now, but we're not
970
    # sure it will be when we actually add the instance. If things go bad
971
    # adding the instance will abort because of a duplicate mac, and the
972
    # creation job will fail.
973
    for nic in self.nics:
974
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
975
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
976

    
977
    #### allocator run
978

    
979
    if self.op.iallocator is not None:
980
      self._RunAllocator()
981

    
982
    # Release all unneeded node locks
983
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
984
                               self.op.src_node_uuid])
985
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
986
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
987
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
988

    
989
    assert (self.owned_locks(locking.LEVEL_NODE) ==
990
            self.owned_locks(locking.LEVEL_NODE_RES)), \
991
      "Node locks differ from node resource locks"
992

    
993
    #### node related checks
994

    
995
    # check primary node
996
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
997
    assert self.pnode is not None, \
998
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
999
    if pnode.offline:
1000
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1001
                                 pnode.name, errors.ECODE_STATE)
1002
    if pnode.drained:
1003
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1004
                                 pnode.name, errors.ECODE_STATE)
1005
    if not pnode.vm_capable:
1006
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1007
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1008

    
1009
    self.secondaries = []
1010

    
1011
    # Fill in any IPs from IP pools. This must happen here, because we need to
1012
    # know the nic's primary node, as specified by the iallocator
1013
    for idx, nic in enumerate(self.nics):
1014
      net_uuid = nic.network
1015
      if net_uuid is not None:
1016
        nobj = self.cfg.GetNetwork(net_uuid)
1017
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1018
        if netparams is None:
1019
          raise errors.OpPrereqError("No netparams found for network"
1020
                                     " %s. Probably not connected to"
1021
                                     " node's %s nodegroup" %
1022
                                     (nobj.name, self.pnode.name),
1023
                                     errors.ECODE_INVAL)
1024
        self.LogInfo("NIC/%d inherits netparams %s" %
1025
                     (idx, netparams.values()))
1026
        nic.nicparams = dict(netparams)
1027
        if nic.ip is not None:
1028
          if nic.ip.lower() == constants.NIC_IP_POOL:
1029
            try:
1030
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1031
            except errors.ReservationError:
1032
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1033
                                         " from the address pool" % idx,
1034
                                         errors.ECODE_STATE)
1035
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1036
          else:
1037
            try:
1038
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1039
            except errors.ReservationError:
1040
              raise errors.OpPrereqError("IP address %s already in use"
1041
                                         " or does not belong to network %s" %
1042
                                         (nic.ip, nobj.name),
1043
                                         errors.ECODE_NOTUNIQUE)
1044

    
1045
      # net is None, ip None or given
1046
      elif self.op.conflicts_check:
1047
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1048

    
1049
    # mirror node verification
1050
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1051
      if self.op.snode_uuid == pnode.uuid:
1052
        raise errors.OpPrereqError("The secondary node cannot be the"
1053
                                   " primary node", errors.ECODE_INVAL)
1054
      CheckNodeOnline(self, self.op.snode_uuid)
1055
      CheckNodeNotDrained(self, self.op.snode_uuid)
1056
      CheckNodeVmCapable(self, self.op.snode_uuid)
1057
      self.secondaries.append(self.op.snode_uuid)
1058

    
1059
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1060
      if pnode.group != snode.group:
1061
        self.LogWarning("The primary and secondary nodes are in two"
1062
                        " different node groups; the disk parameters"
1063
                        " from the first disk's node group will be"
1064
                        " used")
1065

    
1066
    nodes = [pnode]
1067
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1068
      nodes.append(snode)
1069
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1070
    excl_stor = compat.any(map(has_es, nodes))
1071
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1072
      raise errors.OpPrereqError("Disk template %s not supported with"
1073
                                 " exclusive storage" % self.op.disk_template,
1074
                                 errors.ECODE_STATE)
1075
    for disk in self.disks:
1076
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1077

    
1078
    node_uuids = [pnode.uuid] + self.secondaries
1079

    
1080
    if not self.adopt_disks:
1081
      if self.op.disk_template == constants.DT_RBD:
1082
        # _CheckRADOSFreeSpace() is just a placeholder.
1083
        # Any function that checks prerequisites can be placed here.
1084
        # Check if there is enough space on the RADOS cluster.
1085
        CheckRADOSFreeSpace()
1086
      elif self.op.disk_template == constants.DT_EXT:
1087
        # FIXME: Function that checks prereqs if needed
1088
        pass
1089
      elif self.op.disk_template in constants.DTS_LVM:
1090
        # Check lv size requirements, if not adopting
1091
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1092
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1093
      else:
1094
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1095
        pass
1096

    
1097
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1098
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1099
                                disk[constants.IDISK_ADOPT])
1100
                     for disk in self.disks])
1101
      if len(all_lvs) != len(self.disks):
1102
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1103
                                   errors.ECODE_INVAL)
1104
      for lv_name in all_lvs:
1105
        try:
1106
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1107
          # to ReserveLV uses the same syntax
1108
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1109
        except errors.ReservationError:
1110
          raise errors.OpPrereqError("LV named %s used by another instance" %
1111
                                     lv_name, errors.ECODE_NOTUNIQUE)
1112

    
1113
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1114
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1115

    
1116
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1117
                                       vg_names.payload.keys())[pnode.uuid]
1118
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1119
      node_lvs = node_lvs.payload
1120

    
1121
      delta = all_lvs.difference(node_lvs.keys())
1122
      if delta:
1123
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1124
                                   utils.CommaJoin(delta),
1125
                                   errors.ECODE_INVAL)
1126
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1127
      if online_lvs:
1128
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1129
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1130
                                   errors.ECODE_STATE)
1131
      # update the size of disk based on what is found
1132
      for dsk in self.disks:
1133
        dsk[constants.IDISK_SIZE] = \
1134
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1135
                                        dsk[constants.IDISK_ADOPT])][0]))
1136

    
1137
    elif self.op.disk_template == constants.DT_BLOCK:
1138
      # Normalize and de-duplicate device paths
1139
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1140
                       for disk in self.disks])
1141
      if len(all_disks) != len(self.disks):
1142
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1143
                                   errors.ECODE_INVAL)
1144
      baddisks = [d for d in all_disks
1145
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1146
      if baddisks:
1147
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1148
                                   " cannot be adopted" %
1149
                                   (utils.CommaJoin(baddisks),
1150
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1151
                                   errors.ECODE_INVAL)
1152

    
1153
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1154
                                            list(all_disks))[pnode.uuid]
1155
      node_disks.Raise("Cannot get block device information from node %s" %
1156
                       pnode.name)
1157
      node_disks = node_disks.payload
1158
      delta = all_disks.difference(node_disks.keys())
1159
      if delta:
1160
        raise errors.OpPrereqError("Missing block device(s): %s" %
1161
                                   utils.CommaJoin(delta),
1162
                                   errors.ECODE_INVAL)
1163
      for dsk in self.disks:
1164
        dsk[constants.IDISK_SIZE] = \
1165
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1166

    
1167
    # Check disk access param to be compatible with specified hypervisor
1168
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1169
    node_group = self.cfg.GetNodeGroup(node_info.group)
1170
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1171
    access_type = disk_params[self.op.disk_template].get(
1172
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1173
    )
1174

    
1175
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1176
                                            self.op.disk_template,
1177
                                            access_type):
1178
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1179
                                 " used with %s disk access param" %
1180
                                 (self.op.hypervisor, access_type),
1181
                                  errors.ECODE_STATE)
1182

    
1183
    # Verify instance specs
1184
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1185
    ispec = {
1186
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1187
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1188
      constants.ISPEC_DISK_COUNT: len(self.disks),
1189
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1190
                                  for disk in self.disks],
1191
      constants.ISPEC_NIC_COUNT: len(self.nics),
1192
      constants.ISPEC_SPINDLE_USE: spindle_use,
1193
      }
1194

    
1195
    group_info = self.cfg.GetNodeGroup(pnode.group)
1196
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1197
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1198
                                               self.op.disk_template)
1199
    if not self.op.ignore_ipolicy and res:
1200
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1201
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1202
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1203

    
1204
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1205

    
1206
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1207
    # check OS parameters (remotely)
1208
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1209

    
1210
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1211

    
1212
    #TODO: _CheckExtParams (remotely)
1213
    # Check parameters for extstorage
1214

    
1215
    # memory check on primary node
1216
    #TODO(dynmem): use MINMEM for checking
1217
    if self.op.start:
1218
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1219
                                self.op.hvparams)
1220
      CheckNodeFreeMemory(self, self.pnode.uuid,
1221
                          "creating instance %s" % self.op.instance_name,
1222
                          self.be_full[constants.BE_MAXMEM],
1223
                          self.op.hypervisor, hvfull)
1224

    
1225
    self.dry_run_result = list(node_uuids)
1226

    
1227
  def Exec(self, feedback_fn):
1228
    """Create and add the instance to the cluster.
1229

1230
    """
1231
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1232
                self.owned_locks(locking.LEVEL_NODE)), \
1233
      "Node locks differ from node resource locks"
1234
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1235

    
1236
    ht_kind = self.op.hypervisor
1237
    if ht_kind in constants.HTS_REQ_PORT:
1238
      network_port = self.cfg.AllocatePort()
1239
    else:
1240
      network_port = None
1241

    
1242
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1243

    
1244
    # This is ugly but we got a chicken-egg problem here
1245
    # We can only take the group disk parameters, as the instance
1246
    # has no disks yet (we are generating them right here).
1247
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1248
    disks = GenerateDiskTemplate(self,
1249
                                 self.op.disk_template,
1250
                                 instance_uuid, self.pnode.uuid,
1251
                                 self.secondaries,
1252
                                 self.disks,
1253
                                 self.instance_file_storage_dir,
1254
                                 self.op.file_driver,
1255
                                 0,
1256
                                 feedback_fn,
1257
                                 self.cfg.GetGroupDiskParams(nodegroup))
1258

    
1259
    iobj = objects.Instance(name=self.op.instance_name,
1260
                            uuid=instance_uuid,
1261
                            os=self.op.os_type,
1262
                            primary_node=self.pnode.uuid,
1263
                            nics=self.nics, disks=disks,
1264
                            disk_template=self.op.disk_template,
1265
                            disks_active=False,
1266
                            admin_state=constants.ADMINST_DOWN,
1267
                            network_port=network_port,
1268
                            beparams=self.op.beparams,
1269
                            hvparams=self.op.hvparams,
1270
                            hypervisor=self.op.hypervisor,
1271
                            osparams=self.op.osparams,
1272
                            )
1273

    
1274
    if self.op.tags:
1275
      for tag in self.op.tags:
1276
        iobj.AddTag(tag)
1277

    
1278
    if self.adopt_disks:
1279
      if self.op.disk_template == constants.DT_PLAIN:
1280
        # rename LVs to the newly-generated names; we need to construct
1281
        # 'fake' LV disks with the old data, plus the new unique_id
1282
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1283
        rename_to = []
1284
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1285
          rename_to.append(t_dsk.logical_id)
1286
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1287
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1288
                                               zip(tmp_disks, rename_to))
1289
        result.Raise("Failed to rename adoped LVs")
1290
    else:
1291
      feedback_fn("* creating instance disks...")
1292
      try:
1293
        CreateDisks(self, iobj)
1294
      except errors.OpExecError:
1295
        self.LogWarning("Device creation failed")
1296
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1297
        raise
1298

    
1299
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1300

    
1301
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1302

    
1303
    # Declare that we don't want to remove the instance lock anymore, as we've
1304
    # added the instance to the config
1305
    del self.remove_locks[locking.LEVEL_INSTANCE]
1306

    
1307
    if self.op.mode == constants.INSTANCE_IMPORT:
1308
      # Release unused nodes
1309
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1310
    else:
1311
      # Release all nodes
1312
      ReleaseLocks(self, locking.LEVEL_NODE)
1313

    
1314
    disk_abort = False
1315
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1316
      feedback_fn("* wiping instance disks...")
1317
      try:
1318
        WipeDisks(self, iobj)
1319
      except errors.OpExecError, err:
1320
        logging.exception("Wiping disks failed")
1321
        self.LogWarning("Wiping instance disks failed (%s)", err)
1322
        disk_abort = True
1323

    
1324
    if disk_abort:
1325
      # Something is already wrong with the disks, don't do anything else
1326
      pass
1327
    elif self.op.wait_for_sync:
1328
      disk_abort = not WaitForSync(self, iobj)
1329
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1330
      # make sure the disks are not degraded (still sync-ing is ok)
1331
      feedback_fn("* checking mirrors status")
1332
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1333
    else:
1334
      disk_abort = False
1335

    
1336
    if disk_abort:
1337
      RemoveDisks(self, iobj)
1338
      self.cfg.RemoveInstance(iobj.uuid)
1339
      # Make sure the instance lock gets removed
1340
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1341
      raise errors.OpExecError("There are some degraded disks for"
1342
                               " this instance")
1343

    
1344
    # instance disks are now active
1345
    iobj.disks_active = True
1346

    
1347
    # Release all node resource locks
1348
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1349

    
1350
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1351
      if self.op.mode == constants.INSTANCE_CREATE:
1352
        if not self.op.no_install:
1353
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1354
                        not self.op.wait_for_sync)
1355
          if pause_sync:
1356
            feedback_fn("* pausing disk sync to install instance OS")
1357
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1358
                                                              (iobj.disks,
1359
                                                               iobj), True)
1360
            for idx, success in enumerate(result.payload):
1361
              if not success:
1362
                logging.warn("pause-sync of instance %s for disk %d failed",
1363
                             self.op.instance_name, idx)
1364

    
1365
          feedback_fn("* running the instance OS create scripts...")
1366
          # FIXME: pass debug option from opcode to backend
1367
          os_add_result = \
1368
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1369
                                          self.op.debug_level)
1370
          if pause_sync:
1371
            feedback_fn("* resuming disk sync")
1372
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1373
                                                              (iobj.disks,
1374
                                                               iobj), False)
1375
            for idx, success in enumerate(result.payload):
1376
              if not success:
1377
                logging.warn("resume-sync of instance %s for disk %d failed",
1378
                             self.op.instance_name, idx)
1379

    
1380
          os_add_result.Raise("Could not add os for instance %s"
1381
                              " on node %s" % (self.op.instance_name,
1382
                                               self.pnode.name))
1383

    
1384
      else:
1385
        if self.op.mode == constants.INSTANCE_IMPORT:
1386
          feedback_fn("* running the instance OS import scripts...")
1387

    
1388
          transfers = []
1389

    
1390
          for idx, image in enumerate(self.src_images):
1391
            if not image:
1392
              continue
1393

    
1394
            # FIXME: pass debug option from opcode to backend
1395
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1396
                                               constants.IEIO_FILE, (image, ),
1397
                                               constants.IEIO_SCRIPT,
1398
                                               ((iobj.disks[idx], iobj), idx),
1399
                                               None)
1400
            transfers.append(dt)
1401

    
1402
          import_result = \
1403
            masterd.instance.TransferInstanceData(self, feedback_fn,
1404
                                                  self.op.src_node_uuid,
1405
                                                  self.pnode.uuid,
1406
                                                  self.pnode.secondary_ip,
1407
                                                  self.op.compress,
1408
                                                  iobj, transfers)
1409
          if not compat.all(import_result):
1410
            self.LogWarning("Some disks for instance %s on node %s were not"
1411
                            " imported successfully" % (self.op.instance_name,
1412
                                                        self.pnode.name))
1413

    
1414
          rename_from = self._old_instance_name
1415

    
1416
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1417
          feedback_fn("* preparing remote import...")
1418
          # The source cluster will stop the instance before attempting to make
1419
          # a connection. In some cases stopping an instance can take a long
1420
          # time, hence the shutdown timeout is added to the connection
1421
          # timeout.
1422
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1423
                             self.op.source_shutdown_timeout)
1424
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1425

    
1426
          assert iobj.primary_node == self.pnode.uuid
1427
          disk_results = \
1428
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1429
                                          self.source_x509_ca,
1430
                                          self._cds, self.op.compress, timeouts)
1431
          if not compat.all(disk_results):
1432
            # TODO: Should the instance still be started, even if some disks
1433
            # failed to import (valid for local imports, too)?
1434
            self.LogWarning("Some disks for instance %s on node %s were not"
1435
                            " imported successfully" % (self.op.instance_name,
1436
                                                        self.pnode.name))
1437

    
1438
          rename_from = self.source_instance_name
1439

    
1440
        else:
1441
          # also checked in the prereq part
1442
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1443
                                       % self.op.mode)
1444

    
1445
        # Run rename script on newly imported instance
1446
        assert iobj.name == self.op.instance_name
1447
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1448
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1449
                                                   rename_from,
1450
                                                   self.op.debug_level)
1451
        result.Warn("Failed to run rename script for %s on node %s" %
1452
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1453

    
1454
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1455

    
1456
    if self.op.start:
1457
      iobj.admin_state = constants.ADMINST_UP
1458
      self.cfg.Update(iobj, feedback_fn)
1459
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1460
                   self.pnode.name)
1461
      feedback_fn("* starting instance...")
1462
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1463
                                            False, self.op.reason)
1464
      result.Raise("Could not start instance")
1465

    
1466
    return list(iobj.all_nodes)
1467

    
1468

    
1469
class LUInstanceRename(LogicalUnit):
1470
  """Rename an instance.
1471

1472
  """
1473
  HPATH = "instance-rename"
1474
  HTYPE = constants.HTYPE_INSTANCE
1475

    
1476
  def CheckArguments(self):
1477
    """Check arguments.
1478

1479
    """
1480
    if self.op.ip_check and not self.op.name_check:
1481
      # TODO: make the ip check more flexible and not depend on the name check
1482
      raise errors.OpPrereqError("IP address check requires a name check",
1483
                                 errors.ECODE_INVAL)
1484

    
1485
  def BuildHooksEnv(self):
1486
    """Build hooks env.
1487

1488
    This runs on master, primary and secondary nodes of the instance.
1489

1490
    """
1491
    env = BuildInstanceHookEnvByObject(self, self.instance)
1492
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1493
    return env
1494

    
1495
  def BuildHooksNodes(self):
1496
    """Build hooks nodes.
1497

1498
    """
1499
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1500
    return (nl, nl)
1501

    
1502
  def CheckPrereq(self):
1503
    """Check prerequisites.
1504

1505
    This checks that the instance is in the cluster and is not running.
1506

1507
    """
1508
    (self.op.instance_uuid, self.op.instance_name) = \
1509
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1510
                                self.op.instance_name)
1511
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1512
    assert instance is not None
1513

    
1514
    # It should actually not happen that an instance is running with a disabled
1515
    # disk template, but in case it does, the renaming of file-based instances
1516
    # will fail horribly. Thus, we test it before.
1517
    if (instance.disk_template in constants.DTS_FILEBASED and
1518
        self.op.new_name != instance.name):
1519
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1520
                               instance.disk_template)
1521

    
1522
    CheckNodeOnline(self, instance.primary_node)
1523
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1524
                       msg="cannot rename")
1525
    self.instance = instance
1526

    
1527
    new_name = self.op.new_name
1528
    if self.op.name_check:
1529
      hostname = _CheckHostnameSane(self, new_name)
1530
      new_name = self.op.new_name = hostname.name
1531
      if (self.op.ip_check and
1532
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1533
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1534
                                   (hostname.ip, new_name),
1535
                                   errors.ECODE_NOTUNIQUE)
1536

    
1537
    instance_names = [inst.name for
1538
                      inst in self.cfg.GetAllInstancesInfo().values()]
1539
    if new_name in instance_names and new_name != instance.name:
1540
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1541
                                 new_name, errors.ECODE_EXISTS)
1542

    
1543
  def Exec(self, feedback_fn):
1544
    """Rename the instance.
1545

1546
    """
1547
    old_name = self.instance.name
1548

    
1549
    rename_file_storage = False
1550
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1551
        self.op.new_name != self.instance.name):
1552
      old_file_storage_dir = os.path.dirname(
1553
                               self.instance.disks[0].logical_id[1])
1554
      rename_file_storage = True
1555

    
1556
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1557
    # Change the instance lock. This is definitely safe while we hold the BGL.
1558
    # Otherwise the new lock would have to be added in acquired mode.
1559
    assert self.REQ_BGL
1560
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1561
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1562
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1563

    
1564
    # re-read the instance from the configuration after rename
1565
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1566

    
1567
    if rename_file_storage:
1568
      new_file_storage_dir = os.path.dirname(
1569
                               renamed_inst.disks[0].logical_id[1])
1570
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1571
                                                     old_file_storage_dir,
1572
                                                     new_file_storage_dir)
1573
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1574
                   " (but the instance has been renamed in Ganeti)" %
1575
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1576
                    old_file_storage_dir, new_file_storage_dir))
1577

    
1578
    StartInstanceDisks(self, renamed_inst, None)
1579
    # update info on disks
1580
    info = GetInstanceInfoText(renamed_inst)
1581
    for (idx, disk) in enumerate(renamed_inst.disks):
1582
      for node_uuid in renamed_inst.all_nodes:
1583
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1584
                                                (disk, renamed_inst), info)
1585
        result.Warn("Error setting info on node %s for disk %s" %
1586
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1587
    try:
1588
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1589
                                                 renamed_inst, old_name,
1590
                                                 self.op.debug_level)
1591
      result.Warn("Could not run OS rename script for instance %s on node %s"
1592
                  " (but the instance has been renamed in Ganeti)" %
1593
                  (renamed_inst.name,
1594
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1595
                  self.LogWarning)
1596
    finally:
1597
      ShutdownInstanceDisks(self, renamed_inst)
1598

    
1599
    return renamed_inst.name
1600

    
1601

    
1602
class LUInstanceRemove(LogicalUnit):
1603
  """Remove an instance.
1604

1605
  """
1606
  HPATH = "instance-remove"
1607
  HTYPE = constants.HTYPE_INSTANCE
1608
  REQ_BGL = False
1609

    
1610
  def ExpandNames(self):
1611
    self._ExpandAndLockInstance()
1612
    self.needed_locks[locking.LEVEL_NODE] = []
1613
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1614
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1615

    
1616
  def DeclareLocks(self, level):
1617
    if level == locking.LEVEL_NODE:
1618
      self._LockInstancesNodes()
1619
    elif level == locking.LEVEL_NODE_RES:
1620
      # Copy node locks
1621
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1622
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1623

    
1624
  def BuildHooksEnv(self):
1625
    """Build hooks env.
1626

1627
    This runs on master, primary and secondary nodes of the instance.
1628

1629
    """
1630
    env = BuildInstanceHookEnvByObject(self, self.instance)
1631
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1632
    return env
1633

    
1634
  def BuildHooksNodes(self):
1635
    """Build hooks nodes.
1636

1637
    """
1638
    nl = [self.cfg.GetMasterNode()]
1639
    nl_post = list(self.instance.all_nodes) + nl
1640
    return (nl, nl_post)
1641

    
1642
  def CheckPrereq(self):
1643
    """Check prerequisites.
1644

1645
    This checks that the instance is in the cluster.
1646

1647
    """
1648
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1649
    assert self.instance is not None, \
1650
      "Cannot retrieve locked instance %s" % self.op.instance_name
1651

    
1652
  def Exec(self, feedback_fn):
1653
    """Remove the instance.
1654

1655
    """
1656
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1657
                 self.cfg.GetNodeName(self.instance.primary_node))
1658

    
1659
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1660
                                             self.instance,
1661
                                             self.op.shutdown_timeout,
1662
                                             self.op.reason)
1663
    if self.op.ignore_failures:
1664
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1665
    else:
1666
      result.Raise("Could not shutdown instance %s on node %s" %
1667
                   (self.instance.name,
1668
                    self.cfg.GetNodeName(self.instance.primary_node)))
1669

    
1670
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1671
            self.owned_locks(locking.LEVEL_NODE_RES))
1672
    assert not (set(self.instance.all_nodes) -
1673
                self.owned_locks(locking.LEVEL_NODE)), \
1674
      "Not owning correct locks"
1675

    
1676
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1677

    
1678

    
1679
class LUInstanceMove(LogicalUnit):
1680
  """Move an instance by data-copying.
1681

1682
  """
1683
  HPATH = "instance-move"
1684
  HTYPE = constants.HTYPE_INSTANCE
1685
  REQ_BGL = False
1686

    
1687
  def ExpandNames(self):
1688
    self._ExpandAndLockInstance()
1689
    (self.op.target_node_uuid, self.op.target_node) = \
1690
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1691
                            self.op.target_node)
1692
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1693
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1694
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1695

    
1696
  def DeclareLocks(self, level):
1697
    if level == locking.LEVEL_NODE:
1698
      self._LockInstancesNodes(primary_only=True)
1699
    elif level == locking.LEVEL_NODE_RES:
1700
      # Copy node locks
1701
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1702
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1703

    
1704
  def BuildHooksEnv(self):
1705
    """Build hooks env.
1706

1707
    This runs on master, primary and target nodes of the instance.
1708

1709
    """
1710
    env = {
1711
      "TARGET_NODE": self.op.target_node,
1712
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1713
      }
1714
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1715
    return env
1716

    
1717
  def BuildHooksNodes(self):
1718
    """Build hooks nodes.
1719

1720
    """
1721
    nl = [
1722
      self.cfg.GetMasterNode(),
1723
      self.instance.primary_node,
1724
      self.op.target_node_uuid,
1725
      ]
1726
    return (nl, nl)
1727

    
1728
  def CheckPrereq(self):
1729
    """Check prerequisites.
1730

1731
    This checks that the instance is in the cluster.
1732

1733
    """
1734
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1735
    assert self.instance is not None, \
1736
      "Cannot retrieve locked instance %s" % self.op.instance_name
1737

    
1738
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1739
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1740
                                 self.instance.disk_template,
1741
                                 errors.ECODE_STATE)
1742

    
1743
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1744
    assert target_node is not None, \
1745
      "Cannot retrieve locked node %s" % self.op.target_node
1746

    
1747
    self.target_node_uuid = target_node.uuid
1748
    if target_node.uuid == self.instance.primary_node:
1749
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1750
                                 (self.instance.name, target_node.name),
1751
                                 errors.ECODE_STATE)
1752

    
1753
    cluster = self.cfg.GetClusterInfo()
1754
    bep = cluster.FillBE(self.instance)
1755

    
1756
    for idx, dsk in enumerate(self.instance.disks):
1757
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1758
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1759
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1760
                                   " cannot copy" % idx, errors.ECODE_STATE)
1761

    
1762
    CheckNodeOnline(self, target_node.uuid)
1763
    CheckNodeNotDrained(self, target_node.uuid)
1764
    CheckNodeVmCapable(self, target_node.uuid)
1765
    group_info = self.cfg.GetNodeGroup(target_node.group)
1766
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1767
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1768
                           ignore=self.op.ignore_ipolicy)
1769

    
1770
    if self.instance.admin_state == constants.ADMINST_UP:
1771
      # check memory requirements on the target node
1772
      CheckNodeFreeMemory(
1773
          self, target_node.uuid, "failing over instance %s" %
1774
          self.instance.name, bep[constants.BE_MAXMEM],
1775
          self.instance.hypervisor,
1776
          cluster.hvparams[self.instance.hypervisor])
1777
    else:
1778
      self.LogInfo("Not checking memory on the secondary node as"
1779
                   " instance will not be started")
1780

    
1781
    # check bridge existance
1782
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1783

    
1784
  def Exec(self, feedback_fn):
1785
    """Move an instance.
1786

1787
    The move is done by shutting it down on its present node, copying
1788
    the data over (slow) and starting it on the new node.
1789

1790
    """
1791
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1792
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1793

    
1794
    self.LogInfo("Shutting down instance %s on source node %s",
1795
                 self.instance.name, source_node.name)
1796

    
1797
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1798
            self.owned_locks(locking.LEVEL_NODE_RES))
1799

    
1800
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1801
                                             self.op.shutdown_timeout,
1802
                                             self.op.reason)
1803
    if self.op.ignore_consistency:
1804
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1805
                  " anyway. Please make sure node %s is down. Error details" %
1806
                  (self.instance.name, source_node.name, source_node.name),
1807
                  self.LogWarning)
1808
    else:
1809
      result.Raise("Could not shutdown instance %s on node %s" %
1810
                   (self.instance.name, source_node.name))
1811

    
1812
    # create the target disks
1813
    try:
1814
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1815
    except errors.OpExecError:
1816
      self.LogWarning("Device creation failed")
1817
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1818
      raise
1819

    
1820
    errs = []
1821
    transfers = []
1822
    # activate, get path, create transfer jobs
1823
    for idx, disk in enumerate(self.instance.disks):
1824
      # FIXME: pass debug option from opcode to backend
1825
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1826
                                         constants.IEIO_RAW_DISK,
1827
                                         (disk, self.instance),
1828
                                         constants.IEIO_RAW_DISK,
1829
                                         (disk, self.instance),
1830
                                         None)
1831
      transfers.append(dt)
1832

    
1833
    import_result = \
1834
      masterd.instance.TransferInstanceData(self, feedback_fn,
1835
                                            source_node.uuid,
1836
                                            target_node.uuid,
1837
                                            target_node.secondary_ip,
1838
                                            self.op.compress,
1839
                                            self.instance, transfers)
1840
    if not compat.all(import_result):
1841
      errs.append("Failed to transfer instance data")
1842

    
1843
    if errs:
1844
      self.LogWarning("Some disks failed to copy, aborting")
1845
      try:
1846
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1847
      finally:
1848
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1849
        raise errors.OpExecError("Errors during disk copy: %s" %
1850
                                 (",".join(errs),))
1851

    
1852
    self.instance.primary_node = target_node.uuid
1853
    self.cfg.Update(self.instance, feedback_fn)
1854

    
1855
    self.LogInfo("Removing the disks on the original node")
1856
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1857

    
1858
    # Only start the instance if it's marked as up
1859
    if self.instance.admin_state == constants.ADMINST_UP:
1860
      self.LogInfo("Starting instance %s on node %s",
1861
                   self.instance.name, target_node.name)
1862

    
1863
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1864
                                          ignore_secondaries=True)
1865
      if not disks_ok:
1866
        ShutdownInstanceDisks(self, self.instance)
1867
        raise errors.OpExecError("Can't activate the instance's disks")
1868

    
1869
      result = self.rpc.call_instance_start(target_node.uuid,
1870
                                            (self.instance, None, None), False,
1871
                                            self.op.reason)
1872
      msg = result.fail_msg
1873
      if msg:
1874
        ShutdownInstanceDisks(self, self.instance)
1875
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1876
                                 (self.instance.name, target_node.name, msg))
1877

    
1878

    
1879
class LUInstanceMultiAlloc(NoHooksLU):
1880
  """Allocates multiple instances at the same time.
1881

1882
  """
1883
  REQ_BGL = False
1884

    
1885
  def CheckArguments(self):
1886
    """Check arguments.
1887

1888
    """
1889
    nodes = []
1890
    for inst in self.op.instances:
1891
      if inst.iallocator is not None:
1892
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1893
                                   " instance objects", errors.ECODE_INVAL)
1894
      nodes.append(bool(inst.pnode))
1895
      if inst.disk_template in constants.DTS_INT_MIRROR:
1896
        nodes.append(bool(inst.snode))
1897

    
1898
    has_nodes = compat.any(nodes)
1899
    if compat.all(nodes) ^ has_nodes:
1900
      raise errors.OpPrereqError("There are instance objects providing"
1901
                                 " pnode/snode while others do not",
1902
                                 errors.ECODE_INVAL)
1903

    
1904
    if not has_nodes and self.op.iallocator is None:
1905
      default_iallocator = self.cfg.GetDefaultIAllocator()
1906
      if default_iallocator:
1907
        self.op.iallocator = default_iallocator
1908
      else:
1909
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1910
                                   " given and no cluster-wide default"
1911
                                   " iallocator found; please specify either"
1912
                                   " an iallocator or nodes on the instances"
1913
                                   " or set a cluster-wide default iallocator",
1914
                                   errors.ECODE_INVAL)
1915

    
1916
    _CheckOpportunisticLocking(self.op)
1917

    
1918
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1919
    if dups:
1920
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1921
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1922

    
1923
  def ExpandNames(self):
1924
    """Calculate the locks.
1925

1926
    """
1927
    self.share_locks = ShareAll()
1928
    self.needed_locks = {
1929
      # iallocator will select nodes and even if no iallocator is used,
1930
      # collisions with LUInstanceCreate should be avoided
1931
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1932
      }
1933

    
1934
    if self.op.iallocator:
1935
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1936
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1937

    
1938
      if self.op.opportunistic_locking:
1939
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1940
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1941
    else:
1942
      nodeslist = []
1943
      for inst in self.op.instances:
1944
        (inst.pnode_uuid, inst.pnode) = \
1945
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1946
        nodeslist.append(inst.pnode_uuid)
1947
        if inst.snode is not None:
1948
          (inst.snode_uuid, inst.snode) = \
1949
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1950
          nodeslist.append(inst.snode_uuid)
1951

    
1952
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1953
      # Lock resources of instance's primary and secondary nodes (copy to
1954
      # prevent accidential modification)
1955
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1956

    
1957
  def CheckPrereq(self):
1958
    """Check prerequisite.
1959

1960
    """
1961
    if self.op.iallocator:
1962
      cluster = self.cfg.GetClusterInfo()
1963
      default_vg = self.cfg.GetVGName()
1964
      ec_id = self.proc.GetECId()
1965

    
1966
      if self.op.opportunistic_locking:
1967
        # Only consider nodes for which a lock is held
1968
        node_whitelist = self.cfg.GetNodeNames(
1969
                           list(self.owned_locks(locking.LEVEL_NODE)))
1970
      else:
1971
        node_whitelist = None
1972

    
1973
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1974
                                           _ComputeNics(op, cluster, None,
1975
                                                        self.cfg, ec_id),
1976
                                           _ComputeFullBeParams(op, cluster),
1977
                                           node_whitelist)
1978
               for op in self.op.instances]
1979

    
1980
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1981
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1982

    
1983
      ial.Run(self.op.iallocator)
1984

    
1985
      if not ial.success:
1986
        raise errors.OpPrereqError("Can't compute nodes using"
1987
                                   " iallocator '%s': %s" %
1988
                                   (self.op.iallocator, ial.info),
1989
                                   errors.ECODE_NORES)
1990

    
1991
      self.ia_result = ial.result
1992

    
1993
    if self.op.dry_run:
1994
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1995
        constants.JOB_IDS_KEY: [],
1996
        })
1997

    
1998
  def _ConstructPartialResult(self):
1999
    """Contructs the partial result.
2000

2001
    """
2002
    if self.op.iallocator:
2003
      (allocatable, failed_insts) = self.ia_result
2004
      allocatable_insts = map(compat.fst, allocatable)
2005
    else:
2006
      allocatable_insts = [op.instance_name for op in self.op.instances]
2007
      failed_insts = []
2008

    
2009
    return {
2010
      constants.ALLOCATABLE_KEY: allocatable_insts,
2011
      constants.FAILED_KEY: failed_insts,
2012
      }
2013

    
2014
  def Exec(self, feedback_fn):
2015
    """Executes the opcode.
2016

2017
    """
2018
    jobs = []
2019
    if self.op.iallocator:
2020
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2021
      (allocatable, failed) = self.ia_result
2022

    
2023
      for (name, node_names) in allocatable:
2024
        op = op2inst.pop(name)
2025

    
2026
        (op.pnode_uuid, op.pnode) = \
2027
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2028
        if len(node_names) > 1:
2029
          (op.snode_uuid, op.snode) = \
2030
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2031

    
2032
          jobs.append([op])
2033

    
2034
        missing = set(op2inst.keys()) - set(failed)
2035
        assert not missing, \
2036
          "Iallocator did return incomplete result: %s" % \
2037
          utils.CommaJoin(missing)
2038
    else:
2039
      jobs.extend([op] for op in self.op.instances)
2040

    
2041
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2042

    
2043

    
2044
class _InstNicModPrivate:
2045
  """Data structure for network interface modifications.
2046

2047
  Used by L{LUInstanceSetParams}.
2048

2049
  """
2050
  def __init__(self):
2051
    self.params = None
2052
    self.filled = None
2053

    
2054

    
2055
def _PrepareContainerMods(mods, private_fn):
2056
  """Prepares a list of container modifications by adding a private data field.
2057

2058
  @type mods: list of tuples; (operation, index, parameters)
2059
  @param mods: List of modifications
2060
  @type private_fn: callable or None
2061
  @param private_fn: Callable for constructing a private data field for a
2062
    modification
2063
  @rtype: list
2064

2065
  """
2066
  if private_fn is None:
2067
    fn = lambda: None
2068
  else:
2069
    fn = private_fn
2070

    
2071
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2072

    
2073

    
2074
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2075
  """Checks if nodes have enough physical CPUs
2076

2077
  This function checks if all given nodes have the needed number of
2078
  physical CPUs. In case any node has less CPUs or we cannot get the
2079
  information from the node, this function raises an OpPrereqError
2080
  exception.
2081

2082
  @type lu: C{LogicalUnit}
2083
  @param lu: a logical unit from which we get configuration data
2084
  @type node_uuids: C{list}
2085
  @param node_uuids: the list of node UUIDs to check
2086
  @type requested: C{int}
2087
  @param requested: the minimum acceptable number of physical CPUs
2088
  @type hypervisor_specs: list of pairs (string, dict of strings)
2089
  @param hypervisor_specs: list of hypervisor specifications in
2090
      pairs (hypervisor_name, hvparams)
2091
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2092
      or we cannot check the node
2093

2094
  """
2095
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2096
  for node_uuid in node_uuids:
2097
    info = nodeinfo[node_uuid]
2098
    node_name = lu.cfg.GetNodeName(node_uuid)
2099
    info.Raise("Cannot get current information from node %s" % node_name,
2100
               prereq=True, ecode=errors.ECODE_ENVIRON)
2101
    (_, _, (hv_info, )) = info.payload
2102
    num_cpus = hv_info.get("cpu_total", None)
2103
    if not isinstance(num_cpus, int):
2104
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2105
                                 " on node %s, result was '%s'" %
2106
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2107
    if requested > num_cpus:
2108
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2109
                                 "required" % (node_name, num_cpus, requested),
2110
                                 errors.ECODE_NORES)
2111

    
2112

    
2113
def GetItemFromContainer(identifier, kind, container):
2114
  """Return the item refered by the identifier.
2115

2116
  @type identifier: string
2117
  @param identifier: Item index or name or UUID
2118
  @type kind: string
2119
  @param kind: One-word item description
2120
  @type container: list
2121
  @param container: Container to get the item from
2122

2123
  """
2124
  # Index
2125
  try:
2126
    idx = int(identifier)
2127
    if idx == -1:
2128
      # Append
2129
      absidx = len(container) - 1
2130
    elif idx < 0:
2131
      raise IndexError("Not accepting negative indices other than -1")
2132
    elif idx > len(container):
2133
      raise IndexError("Got %s index %s, but there are only %s" %
2134
                       (kind, idx, len(container)))
2135
    else:
2136
      absidx = idx
2137
    return (absidx, container[idx])
2138
  except ValueError:
2139
    pass
2140

    
2141
  for idx, item in enumerate(container):
2142
    if item.uuid == identifier or item.name == identifier:
2143
      return (idx, item)
2144

    
2145
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2146
                             (kind, identifier), errors.ECODE_NOENT)
2147

    
2148

    
2149
def _ApplyContainerMods(kind, container, chgdesc, mods,
2150
                        create_fn, modify_fn, remove_fn,
2151
                        post_add_fn=None):
2152
  """Applies descriptions in C{mods} to C{container}.
2153

2154
  @type kind: string
2155
  @param kind: One-word item description
2156
  @type container: list
2157
  @param container: Container to modify
2158
  @type chgdesc: None or list
2159
  @param chgdesc: List of applied changes
2160
  @type mods: list
2161
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2162
  @type create_fn: callable
2163
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2164
    receives absolute item index, parameters and private data object as added
2165
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2166
    as list
2167
  @type modify_fn: callable
2168
  @param modify_fn: Callback for modifying an existing item
2169
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2170
    and private data object as added by L{_PrepareContainerMods}, returns
2171
    changes as list
2172
  @type remove_fn: callable
2173
  @param remove_fn: Callback on removing item; receives absolute item index,
2174
    item and private data object as added by L{_PrepareContainerMods}
2175
  @type post_add_fn: callable
2176
  @param post_add_fn: Callable for post-processing a newly created item after
2177
    it has been put into the container. It receives the index of the new item
2178
    and the new item as parameters.
2179

2180
  """
2181
  for (op, identifier, params, private) in mods:
2182
    changes = None
2183

    
2184
    if op == constants.DDM_ADD:
2185
      # Calculate where item will be added
2186
      # When adding an item, identifier can only be an index
2187
      try:
2188
        idx = int(identifier)
2189
      except ValueError:
2190
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2191
                                   " identifier for %s" % constants.DDM_ADD,
2192
                                   errors.ECODE_INVAL)
2193
      if idx == -1:
2194
        addidx = len(container)
2195
      else:
2196
        if idx < 0:
2197
          raise IndexError("Not accepting negative indices other than -1")
2198
        elif idx > len(container):
2199
          raise IndexError("Got %s index %s, but there are only %s" %
2200
                           (kind, idx, len(container)))
2201
        addidx = idx
2202

    
2203
      if create_fn is None:
2204
        item = params
2205
      else:
2206
        (item, changes) = create_fn(addidx, params, private)
2207

    
2208
      if idx == -1:
2209
        container.append(item)
2210
      else:
2211
        assert idx >= 0
2212
        assert idx <= len(container)
2213
        # list.insert does so before the specified index
2214
        container.insert(idx, item)
2215

    
2216
      if post_add_fn is not None:
2217
        post_add_fn(addidx, item)
2218

    
2219
    else:
2220
      # Retrieve existing item
2221
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2222

    
2223
      if op == constants.DDM_REMOVE:
2224
        assert not params
2225

    
2226
        changes = [("%s/%s" % (kind, absidx), "remove")]
2227

    
2228
        if remove_fn is not None:
2229
          msg = remove_fn(absidx, item, private)
2230
          if msg:
2231
            changes.append(("%s/%s" % (kind, absidx), msg))
2232

    
2233
        assert container[absidx] == item
2234
        del container[absidx]
2235
      elif op == constants.DDM_MODIFY:
2236
        if modify_fn is not None:
2237
          changes = modify_fn(absidx, item, params, private)
2238
      else:
2239
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2240

    
2241
    assert _TApplyContModsCbChanges(changes)
2242

    
2243
    if not (chgdesc is None or changes is None):
2244
      chgdesc.extend(changes)
2245

    
2246

    
2247
def _UpdateIvNames(base_index, disks):
2248
  """Updates the C{iv_name} attribute of disks.
2249

2250
  @type disks: list of L{objects.Disk}
2251

2252
  """
2253
  for (idx, disk) in enumerate(disks):
2254
    disk.iv_name = "disk/%s" % (base_index + idx, )
2255

    
2256

    
2257
class LUInstanceSetParams(LogicalUnit):
2258
  """Modifies an instances's parameters.
2259

2260
  """
2261
  HPATH = "instance-modify"
2262
  HTYPE = constants.HTYPE_INSTANCE
2263
  REQ_BGL = False
2264

    
2265
  @staticmethod
2266
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2267
    assert ht.TList(mods)
2268
    assert not mods or len(mods[0]) in (2, 3)
2269

    
2270
    if mods and len(mods[0]) == 2:
2271
      result = []
2272

    
2273
      addremove = 0
2274
      for op, params in mods:
2275
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2276
          result.append((op, -1, params))
2277
          addremove += 1
2278

    
2279
          if addremove > 1:
2280
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2281
                                       " supported at a time" % kind,
2282
                                       errors.ECODE_INVAL)
2283
        else:
2284
          result.append((constants.DDM_MODIFY, op, params))
2285

    
2286
      assert verify_fn(result)
2287
    else:
2288
      result = mods
2289

    
2290
    return result
2291

    
2292
  @staticmethod
2293
  def _CheckMods(kind, mods, key_types, item_fn):
2294
    """Ensures requested disk/NIC modifications are valid.
2295

2296
    """
2297
    for (op, _, params) in mods:
2298
      assert ht.TDict(params)
2299

    
2300
      # If 'key_types' is an empty dict, we assume we have an
2301
      # 'ext' template and thus do not ForceDictType
2302
      if key_types:
2303
        utils.ForceDictType(params, key_types)
2304

    
2305
      if op == constants.DDM_REMOVE:
2306
        if params:
2307
          raise errors.OpPrereqError("No settings should be passed when"
2308
                                     " removing a %s" % kind,
2309
                                     errors.ECODE_INVAL)
2310
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2311
        item_fn(op, params)
2312
      else:
2313
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2314

    
2315
  @staticmethod
2316
  def _VerifyDiskModification(op, params, excl_stor):
2317
    """Verifies a disk modification.
2318

2319
    """
2320
    if op == constants.DDM_ADD:
2321
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2322
      if mode not in constants.DISK_ACCESS_SET:
2323
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2324
                                   errors.ECODE_INVAL)
2325

    
2326
      size = params.get(constants.IDISK_SIZE, None)
2327
      if size is None:
2328
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2329
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2330
      size = int(size)
2331

    
2332
      params[constants.IDISK_SIZE] = size
2333
      name = params.get(constants.IDISK_NAME, None)
2334
      if name is not None and name.lower() == constants.VALUE_NONE:
2335
        params[constants.IDISK_NAME] = None
2336

    
2337
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2338

    
2339
    elif op == constants.DDM_MODIFY:
2340
      if constants.IDISK_SIZE in params:
2341
        raise errors.OpPrereqError("Disk size change not possible, use"
2342
                                   " grow-disk", errors.ECODE_INVAL)
2343
      if len(params) > 2:
2344
        raise errors.OpPrereqError("Disk modification doesn't support"
2345
                                   " additional arbitrary parameters",
2346
                                   errors.ECODE_INVAL)
2347
      name = params.get(constants.IDISK_NAME, None)
2348
      if name is not None and name.lower() == constants.VALUE_NONE:
2349
        params[constants.IDISK_NAME] = None
2350

    
2351
  @staticmethod
2352
  def _VerifyNicModification(op, params):
2353
    """Verifies a network interface modification.
2354

2355
    """
2356
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2357
      ip = params.get(constants.INIC_IP, None)
2358
      name = params.get(constants.INIC_NAME, None)
2359
      req_net = params.get(constants.INIC_NETWORK, None)
2360
      link = params.get(constants.NIC_LINK, None)
2361
      mode = params.get(constants.NIC_MODE, None)
2362
      if name is not None and name.lower() == constants.VALUE_NONE:
2363
        params[constants.INIC_NAME] = None
2364
      if req_net is not None:
2365
        if req_net.lower() == constants.VALUE_NONE:
2366
          params[constants.INIC_NETWORK] = None
2367
          req_net = None
2368
        elif link is not None or mode is not None:
2369
          raise errors.OpPrereqError("If network is given"
2370
                                     " mode or link should not",
2371
                                     errors.ECODE_INVAL)
2372

    
2373
      if op == constants.DDM_ADD:
2374
        macaddr = params.get(constants.INIC_MAC, None)
2375
        if macaddr is None:
2376
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2377

    
2378
      if ip is not None:
2379
        if ip.lower() == constants.VALUE_NONE:
2380
          params[constants.INIC_IP] = None
2381
        else:
2382
          if ip.lower() == constants.NIC_IP_POOL:
2383
            if op == constants.DDM_ADD and req_net is None:
2384
              raise errors.OpPrereqError("If ip=pool, parameter network"
2385
                                         " cannot be none",
2386
                                         errors.ECODE_INVAL)
2387
          else:
2388
            if not netutils.IPAddress.IsValid(ip):
2389
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2390
                                         errors.ECODE_INVAL)
2391

    
2392
      if constants.INIC_MAC in params:
2393
        macaddr = params[constants.INIC_MAC]
2394
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2395
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2396

    
2397
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2398
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2399
                                     " modifying an existing NIC",
2400
                                     errors.ECODE_INVAL)
2401

    
2402
  def CheckArguments(self):
2403
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2404
            self.op.hvparams or self.op.beparams or self.op.os_name or
2405
            self.op.osparams or self.op.offline is not None or
2406
            self.op.runtime_mem or self.op.pnode):
2407
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2408

    
2409
    if self.op.hvparams:
2410
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2411
                           "hypervisor", "instance", "cluster")
2412

    
2413
    self.op.disks = self._UpgradeDiskNicMods(
2414
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2415
    self.op.nics = self._UpgradeDiskNicMods(
2416
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2417

    
2418
    if self.op.disks and self.op.disk_template is not None:
2419
      raise errors.OpPrereqError("Disk template conversion and other disk"
2420
                                 " changes not supported at the same time",
2421
                                 errors.ECODE_INVAL)
2422

    
2423
    if (self.op.disk_template and
2424
        self.op.disk_template in constants.DTS_INT_MIRROR and
2425
        self.op.remote_node is None):
2426
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2427
                                 " one requires specifying a secondary node",
2428
                                 errors.ECODE_INVAL)
2429

    
2430
    # Check NIC modifications
2431
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2432
                    self._VerifyNicModification)
2433

    
2434
    if self.op.pnode:
2435
      (self.op.pnode_uuid, self.op.pnode) = \
2436
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2437

    
2438
  def ExpandNames(self):
2439
    self._ExpandAndLockInstance()
2440
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2441
    # Can't even acquire node locks in shared mode as upcoming changes in
2442
    # Ganeti 2.6 will start to modify the node object on disk conversion
2443
    self.needed_locks[locking.LEVEL_NODE] = []
2444
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2445
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2446
    # Look node group to look up the ipolicy
2447
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2448

    
2449
  def DeclareLocks(self, level):
2450
    if level == locking.LEVEL_NODEGROUP:
2451
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2452
      # Acquire locks for the instance's nodegroups optimistically. Needs
2453
      # to be verified in CheckPrereq
2454
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2455
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2456
    elif level == locking.LEVEL_NODE:
2457
      self._LockInstancesNodes()
2458
      if self.op.disk_template and self.op.remote_node:
2459
        (self.op.remote_node_uuid, self.op.remote_node) = \
2460
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2461
                                self.op.remote_node)
2462
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2463
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2464
      # Copy node locks
2465
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2466
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2467

    
2468
  def BuildHooksEnv(self):
2469
    """Build hooks env.
2470

2471
    This runs on the master, primary and secondaries.
2472

2473
    """
2474
    args = {}
2475
    if constants.BE_MINMEM in self.be_new:
2476
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2477
    if constants.BE_MAXMEM in self.be_new:
2478
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2479
    if constants.BE_VCPUS in self.be_new:
2480
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2481
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2482
    # information at all.
2483

    
2484
    if self._new_nics is not None:
2485
      nics = []
2486

    
2487
      for nic in self._new_nics:
2488
        n = copy.deepcopy(nic)
2489
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2490
        n.nicparams = nicparams
2491
        nics.append(NICToTuple(self, n))
2492

    
2493
      args["nics"] = nics
2494

    
2495
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2496
    if self.op.disk_template:
2497
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2498
    if self.op.runtime_mem:
2499
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2500

    
2501
    return env
2502

    
2503
  def BuildHooksNodes(self):
2504
    """Build hooks nodes.
2505

2506
    """
2507
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2508
    return (nl, nl)
2509

    
2510
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2511
                              old_params, cluster, pnode_uuid):
2512

    
2513
    update_params_dict = dict([(key, params[key])
2514
                               for key in constants.NICS_PARAMETERS
2515
                               if key in params])
2516

    
2517
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2518
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2519

    
2520
    new_net_uuid = None
2521
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2522
    if new_net_uuid_or_name:
2523
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2524
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2525

    
2526
    if old_net_uuid:
2527
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2528

    
2529
    if new_net_uuid:
2530
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2531
      if not netparams:
2532
        raise errors.OpPrereqError("No netparams found for the network"
2533
                                   " %s, probably not connected" %
2534
                                   new_net_obj.name, errors.ECODE_INVAL)
2535
      new_params = dict(netparams)
2536
    else:
2537
      new_params = GetUpdatedParams(old_params, update_params_dict)
2538

    
2539
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2540

    
2541
    new_filled_params = cluster.SimpleFillNIC(new_params)
2542
    objects.NIC.CheckParameterSyntax(new_filled_params)
2543

    
2544
    new_mode = new_filled_params[constants.NIC_MODE]
2545
    if new_mode == constants.NIC_MODE_BRIDGED:
2546
      bridge = new_filled_params[constants.NIC_LINK]
2547
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2548
      if msg:
2549
        msg = "Error checking bridges on node '%s': %s" % \
2550
                (self.cfg.GetNodeName(pnode_uuid), msg)
2551
        if self.op.force:
2552
          self.warn.append(msg)
2553
        else:
2554
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2555

    
2556
    elif new_mode == constants.NIC_MODE_ROUTED:
2557
      ip = params.get(constants.INIC_IP, old_ip)
2558
      if ip is None:
2559
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2560
                                   " on a routed NIC", errors.ECODE_INVAL)
2561

    
2562
    elif new_mode == constants.NIC_MODE_OVS:
2563
      # TODO: check OVS link
2564
      self.LogInfo("OVS links are currently not checked for correctness")
2565

    
2566
    if constants.INIC_MAC in params:
2567
      mac = params[constants.INIC_MAC]
2568
      if mac is None:
2569
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2570
                                   errors.ECODE_INVAL)
2571
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2572
        # otherwise generate the MAC address
2573
        params[constants.INIC_MAC] = \
2574
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2575
      else:
2576
        # or validate/reserve the current one
2577
        try:
2578
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2579
        except errors.ReservationError:
2580
          raise errors.OpPrereqError("MAC address '%s' already in use"
2581
                                     " in cluster" % mac,
2582
                                     errors.ECODE_NOTUNIQUE)
2583
    elif new_net_uuid != old_net_uuid:
2584

    
2585
      def get_net_prefix(net_uuid):
2586
        mac_prefix = None
2587
        if net_uuid:
2588
          nobj = self.cfg.GetNetwork(net_uuid)
2589
          mac_prefix = nobj.mac_prefix
2590

    
2591
        return mac_prefix
2592

    
2593
      new_prefix = get_net_prefix(new_net_uuid)
2594
      old_prefix = get_net_prefix(old_net_uuid)
2595
      if old_prefix != new_prefix:
2596
        params[constants.INIC_MAC] = \
2597
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2598

    
2599
    # if there is a change in (ip, network) tuple
2600
    new_ip = params.get(constants.INIC_IP, old_ip)
2601
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2602
      if new_ip:
2603
        # if IP is pool then require a network and generate one IP
2604
        if new_ip.lower() == constants.NIC_IP_POOL:
2605
          if new_net_uuid:
2606
            try:
2607
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2608
            except errors.ReservationError:
2609
              raise errors.OpPrereqError("Unable to get a free IP"
2610
                                         " from the address pool",
2611
                                         errors.ECODE_STATE)
2612
            self.LogInfo("Chose IP %s from network %s",
2613
                         new_ip,
2614
                         new_net_obj.name)
2615
            params[constants.INIC_IP] = new_ip
2616
          else:
2617
            raise errors.OpPrereqError("ip=pool, but no network found",
2618
                                       errors.ECODE_INVAL)
2619
        # Reserve new IP if in the new network if any
2620
        elif new_net_uuid:
2621
          try:
2622
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2623
            self.LogInfo("Reserving IP %s in network %s",
2624
                         new_ip, new_net_obj.name)
2625
          except errors.ReservationError:
2626
            raise errors.OpPrereqError("IP %s not available in network %s" %
2627
                                       (new_ip, new_net_obj.name),
2628
                                       errors.ECODE_NOTUNIQUE)
2629
        # new network is None so check if new IP is a conflicting IP
2630
        elif self.op.conflicts_check:
2631
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2632

    
2633
      # release old IP if old network is not None
2634
      if old_ip and old_net_uuid:
2635
        try:
2636
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2637
        except errors.AddressPoolError:
2638
          logging.warning("Release IP %s not contained in network %s",
2639
                          old_ip, old_net_obj.name)
2640

    
2641
    # there are no changes in (ip, network) tuple and old network is not None
2642
    elif (old_net_uuid is not None and
2643
          (req_link is not None or req_mode is not None)):
2644
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2645
                                 " a NIC that is connected to a network",
2646
                                 errors.ECODE_INVAL)
2647

    
2648
    private.params = new_params
2649
    private.filled = new_filled_params
2650

    
2651
  def _PreCheckDiskTemplate(self, pnode_info):
2652
    """CheckPrereq checks related to a new disk template."""
2653
    # Arguments are passed to avoid configuration lookups
2654
    pnode_uuid = self.instance.primary_node
2655
    if self.instance.disk_template == self.op.disk_template:
2656
      raise errors.OpPrereqError("Instance already has disk template %s" %
2657
                                 self.instance.disk_template,
2658
                                 errors.ECODE_INVAL)
2659

    
2660
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2661
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2662
                                 " cluster." % self.op.disk_template)
2663

    
2664
    if (self.instance.disk_template,
2665
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2666
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2667
                                 " %s to %s" % (self.instance.disk_template,
2668
                                                self.op.disk_template),
2669
                                 errors.ECODE_INVAL)
2670
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2671
                       msg="cannot change disk template")
2672
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2673
      if self.op.remote_node_uuid == pnode_uuid:
2674
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2675
                                   " as the primary node of the instance" %
2676
                                   self.op.remote_node, errors.ECODE_STATE)
2677
      CheckNodeOnline(self, self.op.remote_node_uuid)
2678
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2679
      # FIXME: here we assume that the old instance type is DT_PLAIN
2680
      assert self.instance.disk_template == constants.DT_PLAIN
2681
      disks = [{constants.IDISK_SIZE: d.size,
2682
                constants.IDISK_VG: d.logical_id[0]}
2683
               for d in self.instance.disks]
2684
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2685
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2686

    
2687
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2688
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2689
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2690
                                                              snode_group)
2691
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2692
                             ignore=self.op.ignore_ipolicy)
2693
      if pnode_info.group != snode_info.group:
2694
        self.LogWarning("The primary and secondary nodes are in two"
2695
                        " different node groups; the disk parameters"
2696
                        " from the first disk's node group will be"
2697
                        " used")
2698

    
2699
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2700
      # Make sure none of the nodes require exclusive storage
2701
      nodes = [pnode_info]
2702
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2703
        assert snode_info
2704
        nodes.append(snode_info)
2705
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2706
      if compat.any(map(has_es, nodes)):
2707
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2708
                  " storage is enabled" % (self.instance.disk_template,
2709
                                           self.op.disk_template))
2710
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2711

    
2712
  def _PreCheckDisks(self, ispec):
2713
    """CheckPrereq checks related to disk changes.
2714

2715
    @type ispec: dict
2716
    @param ispec: instance specs to be updated with the new disks
2717

2718
    """
2719
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2720

    
2721
    excl_stor = compat.any(
2722
      rpc.GetExclusiveStorageForNodes(self.cfg,
2723
                                      self.instance.all_nodes).values()
2724
      )
2725

    
2726
    # Check disk modifications. This is done here and not in CheckArguments
2727
    # (as with NICs), because we need to know the instance's disk template
2728
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2729
    if self.instance.disk_template == constants.DT_EXT:
2730
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2731
    else:
2732
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2733
                      ver_fn)
2734

    
2735
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2736

    
2737
    # Check the validity of the `provider' parameter
2738
    if self.instance.disk_template in constants.DT_EXT:
2739
      for mod in self.diskmod:
2740
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2741
        if mod[0] == constants.DDM_ADD:
2742
          if ext_provider is None:
2743
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2744
                                       " '%s' missing, during disk add" %
2745
                                       (constants.DT_EXT,
2746
                                        constants.IDISK_PROVIDER),
2747
                                       errors.ECODE_NOENT)
2748
        elif mod[0] == constants.DDM_MODIFY:
2749
          if ext_provider:
2750
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2751
                                       " modification" %
2752
                                       constants.IDISK_PROVIDER,
2753
                                       errors.ECODE_INVAL)
2754
    else:
2755
      for mod in self.diskmod:
2756
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2757
        if ext_provider is not None:
2758
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2759
                                     " instances of type '%s'" %
2760
                                     (constants.IDISK_PROVIDER,
2761
                                      constants.DT_EXT),
2762
                                     errors.ECODE_INVAL)
2763

    
2764
    if not self.op.wait_for_sync and self.instance.disks_active:
2765
      for mod in self.diskmod:
2766
        if mod[0] == constants.DDM_ADD:
2767
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2768
                                     " activated disks and"
2769
                                     " --no-wait-for-sync given.",
2770
                                     errors.ECODE_INVAL)
2771

    
2772
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2773
      raise errors.OpPrereqError("Disk operations not supported for"
2774
                                 " diskless instances", errors.ECODE_INVAL)
2775

    
2776
    def _PrepareDiskMod(_, disk, params, __):
2777
      disk.name = params.get(constants.IDISK_NAME, None)
2778

    
2779
    # Verify disk changes (operating on a copy)
2780
    disks = copy.deepcopy(self.instance.disks)
2781
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2782
                        _PrepareDiskMod, None)
2783
    utils.ValidateDeviceNames("disk", disks)
2784
    if len(disks) > constants.MAX_DISKS:
2785
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2786
                                 " more" % constants.MAX_DISKS,
2787
                                 errors.ECODE_STATE)
2788
    disk_sizes = [disk.size for disk in self.instance.disks]
2789
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2790
                      self.diskmod if op == constants.DDM_ADD)
2791
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2792
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2793

    
2794
    if self.op.offline is not None and self.op.offline:
2795
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2796
                         msg="can't change to offline")
2797

    
2798
  def CheckPrereq(self):
2799
    """Check prerequisites.
2800

2801
    This only checks the instance list against the existing names.
2802

2803
    """
2804
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2805
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2806
    self.cluster = self.cfg.GetClusterInfo()
2807
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2808

    
2809
    assert self.instance is not None, \
2810
      "Cannot retrieve locked instance %s" % self.op.instance_name
2811

    
2812
    pnode_uuid = self.instance.primary_node
2813

    
2814
    self.warn = []
2815

    
2816
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2817
        not self.op.force):
2818
      # verify that the instance is not up
2819
      instance_info = self.rpc.call_instance_info(
2820
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2821
          cluster_hvparams)
2822
      if instance_info.fail_msg:
2823
        self.warn.append("Can't get instance runtime information: %s" %
2824
                         instance_info.fail_msg)
2825
      elif instance_info.payload:
2826
        raise errors.OpPrereqError("Instance is still running on %s" %
2827
                                   self.cfg.GetNodeName(pnode_uuid),
2828
                                   errors.ECODE_STATE)
2829

    
2830
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2831
    node_uuids = list(self.instance.all_nodes)
2832
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2833

    
2834
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2835
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2836
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2837

    
2838
    # dictionary with instance information after the modification
2839
    ispec = {}
2840

    
2841
    if self.op.hotplug:
2842
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2843
                                               self.instance)
2844
      result.Raise("Hotplug is not supported.")
2845

    
2846
    # Prepare NIC modifications
2847
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2848

    
2849
    # OS change
2850
    if self.op.os_name and not self.op.force:
2851
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2852
                     self.op.force_variant)
2853
      instance_os = self.op.os_name
2854
    else:
2855
      instance_os = self.instance.os
2856

    
2857
    assert not (self.op.disk_template and self.op.disks), \
2858
      "Can't modify disk template and apply disk changes at the same time"
2859

    
2860
    if self.op.disk_template:
2861
      self._PreCheckDiskTemplate(pnode_info)
2862

    
2863
    self._PreCheckDisks(ispec)
2864

    
2865
    # hvparams processing
2866
    if self.op.hvparams:
2867
      hv_type = self.instance.hypervisor
2868
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2869
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2870
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2871

    
2872
      # local check
2873
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2874
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2875
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2876
      self.hv_inst = i_hvdict # the new dict (without defaults)
2877
    else:
2878
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2879
                                                   self.instance.os,
2880
                                                   self.instance.hvparams)
2881
      self.hv_new = self.hv_inst = {}
2882

    
2883
    # beparams processing
2884
    if self.op.beparams:
2885
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2886
                                  use_none=True)
2887
      objects.UpgradeBeParams(i_bedict)
2888
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2889
      be_new = self.cluster.SimpleFillBE(i_bedict)
2890
      self.be_proposed = self.be_new = be_new # the new actual values
2891
      self.be_inst = i_bedict # the new dict (without defaults)
2892
    else:
2893
      self.be_new = self.be_inst = {}
2894
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2895
    be_old = self.cluster.FillBE(self.instance)
2896

    
2897
    # CPU param validation -- checking every time a parameter is
2898
    # changed to cover all cases where either CPU mask or vcpus have
2899
    # changed
2900
    if (constants.BE_VCPUS in self.be_proposed and
2901
        constants.HV_CPU_MASK in self.hv_proposed):
2902
      cpu_list = \
2903
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2904
      # Verify mask is consistent with number of vCPUs. Can skip this
2905
      # test if only 1 entry in the CPU mask, which means same mask
2906
      # is applied to all vCPUs.
2907
      if (len(cpu_list) > 1 and
2908
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2909
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2910
                                   " CPU mask [%s]" %
2911
                                   (self.be_proposed[constants.BE_VCPUS],
2912
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2913
                                   errors.ECODE_INVAL)
2914

    
2915
      # Only perform this test if a new CPU mask is given
2916
      if constants.HV_CPU_MASK in self.hv_new:
2917
        # Calculate the largest CPU number requested
2918
        max_requested_cpu = max(map(max, cpu_list))
2919
        # Check that all of the instance's nodes have enough physical CPUs to
2920
        # satisfy the requested CPU mask
2921
        hvspecs = [(self.instance.hypervisor,
2922
                    self.cfg.GetClusterInfo()
2923
                      .hvparams[self.instance.hypervisor])]
2924
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2925
                                max_requested_cpu + 1,
2926
                                hvspecs)
2927

    
2928
    # osparams processing
2929
    if self.op.osparams:
2930
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2931
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2932
      self.os_inst = i_osdict # the new dict (without defaults)
2933
    else:
2934
      self.os_inst = {}
2935

    
2936
    #TODO(dynmem): do the appropriate check involving MINMEM
2937
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2938
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2939
      mem_check_list = [pnode_uuid]
2940
      if be_new[constants.BE_AUTO_BALANCE]:
2941
        # either we changed auto_balance to yes or it was from before
2942
        mem_check_list.extend(self.instance.secondary_nodes)
2943
      instance_info = self.rpc.call_instance_info(
2944
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2945
          cluster_hvparams)
2946
      hvspecs = [(self.instance.hypervisor,
2947
                  cluster_hvparams)]
2948
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2949
                                         hvspecs)
2950
      pninfo = nodeinfo[pnode_uuid]
2951
      msg = pninfo.fail_msg
2952
      if msg:
2953
        # Assume the primary node is unreachable and go ahead
2954
        self.warn.append("Can't get info from primary node %s: %s" %
2955
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2956
      else:
2957
        (_, _, (pnhvinfo, )) = pninfo.payload
2958
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2959
          self.warn.append("Node data from primary node %s doesn't contain"
2960
                           " free memory information" %
2961
                           self.cfg.GetNodeName(pnode_uuid))
2962
        elif instance_info.fail_msg:
2963
          self.warn.append("Can't get instance runtime information: %s" %
2964
                           instance_info.fail_msg)
2965
        else:
2966
          if instance_info.payload:
2967
            current_mem = int(instance_info.payload["memory"])
2968
          else:
2969
            # Assume instance not running
2970
            # (there is a slight race condition here, but it's not very
2971
            # probable, and we have no other way to check)
2972
            # TODO: Describe race condition
2973
            current_mem = 0
2974
          #TODO(dynmem): do the appropriate check involving MINMEM
2975
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2976
                      pnhvinfo["memory_free"])
2977
          if miss_mem > 0:
2978
            raise errors.OpPrereqError("This change will prevent the instance"
2979
                                       " from starting, due to %d MB of memory"
2980
                                       " missing on its primary node" %
2981
                                       miss_mem, errors.ECODE_NORES)
2982

    
2983
      if be_new[constants.BE_AUTO_BALANCE]:
2984
        for node_uuid, nres in nodeinfo.items():
2985
          if node_uuid not in self.instance.secondary_nodes:
2986
            continue
2987
          nres.Raise("Can't get info from secondary node %s" %
2988
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2989
                     ecode=errors.ECODE_STATE)
2990
          (_, _, (nhvinfo, )) = nres.payload
2991
          if not isinstance(nhvinfo.get("memory_free", None), int):
2992
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2993
                                       " memory information" %
2994
                                       self.cfg.GetNodeName(node_uuid),
2995
                                       errors.ECODE_STATE)
2996
          #TODO(dynmem): do the appropriate check involving MINMEM
2997
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2998
            raise errors.OpPrereqError("This change will prevent the instance"
2999
                                       " from failover to its secondary node"
3000
                                       " %s, due to not enough memory" %
3001
                                       self.cfg.GetNodeName(node_uuid),
3002
                                       errors.ECODE_STATE)
3003

    
3004
    if self.op.runtime_mem:
3005
      remote_info = self.rpc.call_instance_info(
3006
         self.instance.primary_node, self.instance.name,
3007
         self.instance.hypervisor,
3008
         cluster_hvparams)
3009
      remote_info.Raise("Error checking node %s" %
3010
                        self.cfg.GetNodeName(self.instance.primary_node))
3011
      if not remote_info.payload: # not running already
3012
        raise errors.OpPrereqError("Instance %s is not running" %
3013
                                   self.instance.name, errors.ECODE_STATE)
3014

    
3015
      current_memory = remote_info.payload["memory"]
3016
      if (not self.op.force and
3017
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3018
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3019
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3020
                                   " and %d MB of memory unless --force is"
3021
                                   " given" %
3022
                                   (self.instance.name,
3023
                                    self.be_proposed[constants.BE_MINMEM],
3024
                                    self.be_proposed[constants.BE_MAXMEM]),
3025
                                   errors.ECODE_INVAL)
3026

    
3027
      delta = self.op.runtime_mem - current_memory
3028
      if delta > 0:
3029
        CheckNodeFreeMemory(
3030
            self, self.instance.primary_node,
3031
            "ballooning memory for instance %s" % self.instance.name, delta,
3032
            self.instance.hypervisor,
3033
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3034

    
3035
    # make self.cluster visible in the functions below
3036
    cluster = self.cluster
3037

    
3038
    def _PrepareNicCreate(_, params, private):
3039
      self._PrepareNicModification(params, private, None, None,
3040
                                   {}, cluster, pnode_uuid)
3041
      return (None, None)
3042

    
3043
    def _PrepareNicMod(_, nic, params, private):
3044
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3045
                                   nic.nicparams, cluster, pnode_uuid)
3046
      return None
3047

    
3048
    def _PrepareNicRemove(_, params, __):
3049
      ip = params.ip
3050
      net = params.network
3051
      if net is not None and ip is not None:
3052
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3053

    
3054
    # Verify NIC changes (operating on copy)
3055
    nics = self.instance.nics[:]
3056
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3057
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3058
    if len(nics) > constants.MAX_NICS:
3059
      raise errors.OpPrereqError("Instance has too many network interfaces"
3060
                                 " (%d), cannot add more" % constants.MAX_NICS,
3061
                                 errors.ECODE_STATE)
3062

    
3063
    # Pre-compute NIC changes (necessary to use result in hooks)
3064
    self._nic_chgdesc = []
3065
    if self.nicmod:
3066
      # Operate on copies as this is still in prereq
3067
      nics = [nic.Copy() for nic in self.instance.nics]
3068
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3069
                          self._CreateNewNic, self._ApplyNicMods,
3070
                          self._RemoveNic)
3071
      # Verify that NIC names are unique and valid
3072
      utils.ValidateDeviceNames("NIC", nics)
3073
      self._new_nics = nics
3074
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3075
    else:
3076
      self._new_nics = None
3077
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3078

    
3079
    if not self.op.ignore_ipolicy:
3080
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3081
                                                              group_info)
3082

    
3083
      # Fill ispec with backend parameters
3084
      ispec[constants.ISPEC_SPINDLE_USE] = \
3085
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3086
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3087
                                                         None)
3088

    
3089
      # Copy ispec to verify parameters with min/max values separately
3090
      if self.op.disk_template:
3091
        new_disk_template = self.op.disk_template
3092
      else:
3093
        new_disk_template = self.instance.disk_template
3094
      ispec_max = ispec.copy()
3095
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3096
        self.be_new.get(constants.BE_MAXMEM, None)
3097
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3098
                                                     new_disk_template)
3099
      ispec_min = ispec.copy()
3100
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3101
        self.be_new.get(constants.BE_MINMEM, None)
3102
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3103
                                                     new_disk_template)
3104

    
3105
      if (res_max or res_min):
3106
        # FIXME: Improve error message by including information about whether
3107
        # the upper or lower limit of the parameter fails the ipolicy.
3108
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3109
               (group_info, group_info.name,
3110
                utils.CommaJoin(set(res_max + res_min))))
3111
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3112

    
3113
  def _ConvertPlainToDrbd(self, feedback_fn):
3114
    """Converts an instance from plain to drbd.
3115

3116
    """
3117
    feedback_fn("Converting template to drbd")
3118
    pnode_uuid = self.instance.primary_node
3119
    snode_uuid = self.op.remote_node_uuid
3120

    
3121
    assert self.instance.disk_template == constants.DT_PLAIN
3122

    
3123
    # create a fake disk info for _GenerateDiskTemplate
3124
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3125
                  constants.IDISK_VG: d.logical_id[0],
3126
                  constants.IDISK_NAME: d.name}
3127
                 for d in self.instance.disks]
3128
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3129
                                     self.instance.uuid, pnode_uuid,
3130
                                     [snode_uuid], disk_info, None, None, 0,
3131
                                     feedback_fn, self.diskparams)
3132
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3133
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3134
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3135
    info = GetInstanceInfoText(self.instance)
3136
    feedback_fn("Creating additional volumes...")
3137
    # first, create the missing data and meta devices
3138
    for disk in anno_disks:
3139
      # unfortunately this is... not too nice
3140
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3141
                           info, True, p_excl_stor)
3142
      for child in disk.children:
3143
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3144
                             s_excl_stor)
3145
    # at this stage, all new LVs have been created, we can rename the
3146
    # old ones
3147
    feedback_fn("Renaming original volumes...")
3148
    rename_list = [(o, n.children[0].logical_id)
3149
                   for (o, n) in zip(self.instance.disks, new_disks)]
3150
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3151
    result.Raise("Failed to rename original LVs")
3152

    
3153
    feedback_fn("Initializing DRBD devices...")
3154
    # all child devices are in place, we can now create the DRBD devices
3155
    try:
3156
      for disk in anno_disks:
3157
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3158
                                       (snode_uuid, s_excl_stor)]:
3159
          f_create = node_uuid == pnode_uuid
3160
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3161
                               f_create, excl_stor)
3162
    except errors.GenericError, e:
3163
      feedback_fn("Initializing of DRBD devices failed;"
3164
                  " renaming back original volumes...")
3165
      rename_back_list = [(n.children[0], o.logical_id)
3166
                          for (n, o) in zip(new_disks, self.instance.disks)]
3167
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3168
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3169
      raise
3170

    
3171
    # at this point, the instance has been modified
3172
    self.instance.disk_template = constants.DT_DRBD8
3173
    self.instance.disks = new_disks
3174
    self.cfg.Update(self.instance, feedback_fn)
3175

    
3176
    # Release node locks while waiting for sync
3177
    ReleaseLocks(self, locking.LEVEL_NODE)
3178

    
3179
    # disks are created, waiting for sync
3180
    disk_abort = not WaitForSync(self, self.instance,
3181
                                 oneshot=not self.op.wait_for_sync)
3182
    if disk_abort:
3183
      raise errors.OpExecError("There are some degraded disks for"
3184
                               " this instance, please cleanup manually")
3185

    
3186
    # Node resource locks will be released by caller
3187

    
3188
  def _ConvertDrbdToPlain(self, feedback_fn):
3189
    """Converts an instance from drbd to plain.
3190

3191
    """
3192
    assert len(self.instance.secondary_nodes) == 1
3193
    assert self.instance.disk_template == constants.DT_DRBD8
3194

    
3195
    pnode_uuid = self.instance.primary_node
3196
    snode_uuid = self.instance.secondary_nodes[0]
3197
    feedback_fn("Converting template to plain")
3198

    
3199
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3200
    new_disks = [d.children[0] for d in self.instance.disks]
3201

    
3202
    # copy over size, mode and name
3203
    for parent, child in zip(old_disks, new_disks):
3204
      child.size = parent.size
3205
      child.mode = parent.mode
3206
      child.name = parent.name
3207

    
3208
    # this is a DRBD disk, return its port to the pool
3209
    # NOTE: this must be done right before the call to cfg.Update!
3210
    for disk in old_disks:
3211
      tcp_port = disk.logical_id[2]
3212
      self.cfg.AddTcpUdpPort(tcp_port)
3213

    
3214
    # update instance structure
3215
    self.instance.disks = new_disks
3216
    self.instance.disk_template = constants.DT_PLAIN
3217
    _UpdateIvNames(0, self.instance.disks)
3218
    self.cfg.Update(self.instance, feedback_fn)
3219

    
3220
    # Release locks in case removing disks takes a while
3221
    ReleaseLocks(self, locking.LEVEL_NODE)
3222

    
3223
    feedback_fn("Removing volumes on the secondary node...")
3224
    for disk in old_disks:
3225
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3226
      result.Warn("Could not remove block device %s on node %s,"
3227
                  " continuing anyway" %
3228
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3229
                  self.LogWarning)
3230

    
3231
    feedback_fn("Removing unneeded volumes on the primary node...")
3232
    for idx, disk in enumerate(old_disks):
3233
      meta = disk.children[1]
3234
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3235
      result.Warn("Could not remove metadata for disk %d on node %s,"
3236
                  " continuing anyway" %
3237
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3238
                  self.LogWarning)
3239

    
3240
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3241
    self.LogInfo("Trying to hotplug device...")
3242
    msg = "hotplug:"
3243
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3244
                                          self.instance, action, dev_type,
3245
                                          (device, self.instance),
3246
                                          extra, seq)
3247
    if result.fail_msg:
3248
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3249
      self.LogInfo("Continuing execution..")
3250
      msg += "failed"
3251
    else:
3252
      self.LogInfo("Hotplug done.")
3253
      msg += "done"
3254
    return msg
3255

    
3256
  def _CreateNewDisk(self, idx, params, _):
3257
    """Creates a new disk.
3258

3259
    """
3260
    # add a new disk
3261
    if self.instance.disk_template in constants.DTS_FILEBASED:
3262
      (file_driver, file_path) = self.instance.disks[0].logical_id
3263
      file_path = os.path.dirname(file_path)
3264
    else:
3265
      file_driver = file_path = None
3266

    
3267
    disk = \
3268
      GenerateDiskTemplate(self, self.instance.disk_template,
3269
                           self.instance.uuid, self.instance.primary_node,
3270
                           self.instance.secondary_nodes, [params], file_path,
3271
                           file_driver, idx, self.Log, self.diskparams)[0]
3272

    
3273
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3274

    
3275
    if self.cluster.prealloc_wipe_disks:
3276
      # Wipe new disk
3277
      WipeOrCleanupDisks(self, self.instance,
3278
                         disks=[(idx, disk, 0)],
3279
                         cleanup=new_disks)
3280

    
3281
    changes = [
3282
      ("disk/%d" % idx,
3283
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3284
      ]
3285
    if self.op.hotplug:
3286
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3287
                                               (disk, self.instance),
3288
                                               self.instance.name, True, idx)
3289
      if result.fail_msg:
3290
        changes.append(("disk/%d" % idx, "assemble:failed"))
3291
        self.LogWarning("Can't assemble newly created disk %d: %s",
3292
                        idx, result.fail_msg)
3293
      else:
3294
        _, link_name = result.payload
3295
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3296
                                  constants.HOTPLUG_TARGET_DISK,
3297
                                  disk, link_name, idx)
3298
        changes.append(("disk/%d" % idx, msg))
3299

    
3300
    return (disk, changes)
3301

    
3302
  def _PostAddDisk(self, _, disk):
3303
    if not WaitForSync(self, self.instance, disks=[disk],
3304
                       oneshot=not self.op.wait_for_sync):
3305
      raise errors.OpExecError("Failed to sync disks of %s" %
3306
                               self.instance.name)
3307

    
3308
    # the disk is active at this point, so deactivate it if the instance disks
3309
    # are supposed to be inactive
3310
    if not self.instance.disks_active:
3311
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3312

    
3313
  @staticmethod
3314
  def _ModifyDisk(idx, disk, params, _):
3315
    """Modifies a disk.
3316

3317
    """
3318
    changes = []
3319
    mode = params.get(constants.IDISK_MODE, None)
3320
    if mode:
3321
      disk.mode = mode
3322
      changes.append(("disk.mode/%d" % idx, disk.mode))
3323

    
3324
    name = params.get(constants.IDISK_NAME, None)
3325
    disk.name = name
3326
    changes.append(("disk.name/%d" % idx, disk.name))
3327

    
3328
    return changes
3329

    
3330
  def _RemoveDisk(self, idx, root, _):
3331
    """Removes a disk.
3332

3333
    """
3334
    hotmsg = ""
3335
    if self.op.hotplug:
3336
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3337
                                   constants.HOTPLUG_TARGET_DISK,
3338
                                   root, None, idx)
3339
      ShutdownInstanceDisks(self, self.instance, [root])
3340

    
3341
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3342
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3343
                             self.instance.primary_node):
3344
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3345
              .fail_msg
3346
      if msg:
3347
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3348
                        " continuing anyway", idx,
3349
                        self.cfg.GetNodeName(node_uuid), msg)
3350

    
3351
    # if this is a DRBD disk, return its port to the pool
3352
    if root.dev_type in constants.DTS_DRBD:
3353
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3354

    
3355
    return hotmsg
3356

    
3357
  def _CreateNewNic(self, idx, params, private):
3358
    """Creates data structure for a new network interface.
3359

3360
    """
3361
    mac = params[constants.INIC_MAC]
3362
    ip = params.get(constants.INIC_IP, None)
3363
    net = params.get(constants.INIC_NETWORK, None)
3364
    name = params.get(constants.INIC_NAME, None)
3365
    net_uuid = self.cfg.LookupNetwork(net)
3366
    #TODO: not private.filled?? can a nic have no nicparams??
3367
    nicparams = private.filled
3368
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3369
                       nicparams=nicparams)
3370
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3371

    
3372
    changes = [
3373
      ("nic.%d" % idx,
3374
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3375
       (mac, ip, private.filled[constants.NIC_MODE],
3376
       private.filled[constants.NIC_LINK], net)),
3377
      ]
3378

    
3379
    if self.op.hotplug:
3380
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3381
                                constants.HOTPLUG_TARGET_NIC,
3382
                                nobj, None, idx)
3383
      changes.append(("nic.%d" % idx, msg))
3384

    
3385
    return (nobj, changes)
3386

    
3387
  def _ApplyNicMods(self, idx, nic, params, private):
3388
    """Modifies a network interface.
3389

3390
    """
3391
    changes = []
3392

    
3393
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3394
      if key in params:
3395
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3396
        setattr(nic, key, params[key])
3397

    
3398
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3399
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3400
    if new_net_uuid != nic.network:
3401
      changes.append(("nic.network/%d" % idx, new_net))
3402
      nic.network = new_net_uuid
3403

    
3404
    if private.filled:
3405
      nic.nicparams = private.filled
3406

    
3407
      for (key, val) in nic.nicparams.items():
3408
        changes.append(("nic.%s/%d" % (key, idx), val))
3409

    
3410
    if self.op.hotplug:
3411
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3412
                                constants.HOTPLUG_TARGET_NIC,
3413
                                nic, None, idx)
3414
      changes.append(("nic/%d" % idx, msg))
3415

    
3416
    return changes
3417

    
3418
  def _RemoveNic(self, idx, nic, _):
3419
    if self.op.hotplug:
3420
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3421
                                 constants.HOTPLUG_TARGET_NIC,
3422
                                 nic, None, idx)
3423

    
3424
  def Exec(self, feedback_fn):
3425
    """Modifies an instance.
3426

3427
    All parameters take effect only at the next restart of the instance.
3428

3429
    """
3430
    # Process here the warnings from CheckPrereq, as we don't have a
3431
    # feedback_fn there.
3432
    # TODO: Replace with self.LogWarning
3433
    for warn in self.warn:
3434
      feedback_fn("WARNING: %s" % warn)
3435

    
3436
    assert ((self.op.disk_template is None) ^
3437
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3438
      "Not owning any node resource locks"
3439

    
3440
    result = []
3441

    
3442
    # New primary node
3443
    if self.op.pnode_uuid:
3444
      self.instance.primary_node = self.op.pnode_uuid
3445

    
3446
    # runtime memory
3447
    if self.op.runtime_mem:
3448
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3449
                                                     self.instance,
3450
                                                     self.op.runtime_mem)
3451
      rpcres.Raise("Cannot modify instance runtime memory")
3452
      result.append(("runtime_memory", self.op.runtime_mem))
3453

    
3454
    # Apply disk changes
3455
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3456
                        self._CreateNewDisk, self._ModifyDisk,
3457
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3458
    _UpdateIvNames(0, self.instance.disks)
3459

    
3460
    if self.op.disk_template:
3461
      if __debug__:
3462
        check_nodes = set(self.instance.all_nodes)
3463
        if self.op.remote_node_uuid:
3464
          check_nodes.add(self.op.remote_node_uuid)
3465
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3466
          owned = self.owned_locks(level)
3467
          assert not (check_nodes - owned), \
3468
            ("Not owning the correct locks, owning %r, expected at least %r" %
3469
             (owned, check_nodes))
3470

    
3471
      r_shut = ShutdownInstanceDisks(self, self.instance)
3472
      if not r_shut:
3473
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3474
                                 " proceed with disk template conversion")
3475
      mode = (self.instance.disk_template, self.op.disk_template)
3476
      try:
3477
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3478
      except:
3479
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3480
        raise
3481
      result.append(("disk_template", self.op.disk_template))
3482

    
3483
      assert self.instance.disk_template == self.op.disk_template, \
3484
        ("Expected disk template '%s', found '%s'" %
3485
         (self.op.disk_template, self.instance.disk_template))
3486

    
3487
    # Release node and resource locks if there are any (they might already have
3488
    # been released during disk conversion)
3489
    ReleaseLocks(self, locking.LEVEL_NODE)
3490
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3491

    
3492
    # Apply NIC changes
3493
    if self._new_nics is not None:
3494
      self.instance.nics = self._new_nics
3495
      result.extend(self._nic_chgdesc)
3496

    
3497
    # hvparams changes
3498
    if self.op.hvparams:
3499
      self.instance.hvparams = self.hv_inst
3500
      for key, val in self.op.hvparams.iteritems():
3501
        result.append(("hv/%s" % key, val))
3502

    
3503
    # beparams changes
3504
    if self.op.beparams:
3505
      self.instance.beparams = self.be_inst
3506
      for key, val in self.op.beparams.iteritems():
3507
        result.append(("be/%s" % key, val))
3508

    
3509
    # OS change
3510
    if self.op.os_name:
3511
      self.instance.os = self.op.os_name
3512

    
3513
    # osparams changes
3514
    if self.op.osparams:
3515
      self.instance.osparams = self.os_inst
3516
      for key, val in self.op.osparams.iteritems():
3517
        result.append(("os/%s" % key, val))
3518

    
3519
    if self.op.offline is None:
3520
      # Ignore
3521
      pass
3522
    elif self.op.offline:
3523
      # Mark instance as offline
3524
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3525
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3526
    else:
3527
      # Mark instance as online, but stopped
3528
      self.cfg.MarkInstanceDown(self.instance.uuid)
3529
      result.append(("admin_state", constants.ADMINST_DOWN))
3530

    
3531
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3532

    
3533
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3534
                self.owned_locks(locking.LEVEL_NODE)), \
3535
      "All node locks should have been released by now"
3536

    
3537
    return result
3538

    
3539
  _DISK_CONVERSIONS = {
3540
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3541
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3542
    }
3543

    
3544

    
3545
class LUInstanceChangeGroup(LogicalUnit):
3546
  HPATH = "instance-change-group"
3547
  HTYPE = constants.HTYPE_INSTANCE
3548
  REQ_BGL = False
3549

    
3550
  def ExpandNames(self):
3551
    self.share_locks = ShareAll()
3552

    
3553
    self.needed_locks = {
3554
      locking.LEVEL_NODEGROUP: [],
3555
      locking.LEVEL_NODE: [],
3556
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3557
      }
3558

    
3559
    self._ExpandAndLockInstance()
3560

    
3561
    if self.op.target_groups:
3562
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3563
                                  self.op.target_groups)
3564
    else:
3565
      self.req_target_uuids = None
3566

    
3567
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3568

    
3569
  def DeclareLocks(self, level):
3570
    if level == locking.LEVEL_NODEGROUP:
3571
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3572

    
3573
      if self.req_target_uuids:
3574
        lock_groups = set(self.req_target_uuids)
3575

    
3576
        # Lock all groups used by instance optimistically; this requires going
3577
        # via the node before it's locked, requiring verification later on
3578
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3579
        lock_groups.update(instance_groups)
3580
      else:
3581
        # No target groups, need to lock all of them
3582
        lock_groups = locking.ALL_SET
3583

    
3584
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3585

    
3586
    elif level == locking.LEVEL_NODE:
3587
      if self.req_target_uuids:
3588
        # Lock all nodes used by instances
3589
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3590
        self._LockInstancesNodes()
3591

    
3592
        # Lock all nodes in all potential target groups
3593
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3594
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3595
        member_nodes = [node_uuid
3596
                        for group in lock_groups
3597
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3598
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3599
      else:
3600
        # Lock all nodes as all groups are potential targets
3601
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3602

    
3603
  def CheckPrereq(self):
3604
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3605
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3606
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3607

    
3608
    assert (self.req_target_uuids is None or
3609
            owned_groups.issuperset(self.req_target_uuids))
3610
    assert owned_instance_names == set([self.op.instance_name])
3611

    
3612
    # Get instance information
3613
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3614

    
3615
    # Check if node groups for locked instance are still correct
3616
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3617
      ("Instance %s's nodes changed while we kept the lock" %
3618
       self.op.instance_name)
3619

    
3620
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3621
                                          owned_groups)
3622

    
3623
    if self.req_target_uuids:
3624
      # User requested specific target groups
3625
      self.target_uuids = frozenset(self.req_target_uuids)
3626
    else:
3627
      # All groups except those used by the instance are potential targets
3628
      self.target_uuids = owned_groups - inst_groups
3629

    
3630
    conflicting_groups = self.target_uuids & inst_groups
3631
    if conflicting_groups:
3632
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3633
                                 " used by the instance '%s'" %
3634
                                 (utils.CommaJoin(conflicting_groups),
3635
                                  self.op.instance_name),
3636
                                 errors.ECODE_INVAL)
3637

    
3638
    if not self.target_uuids:
3639
      raise errors.OpPrereqError("There are no possible target groups",
3640
                                 errors.ECODE_INVAL)
3641

    
3642
  def BuildHooksEnv(self):
3643
    """Build hooks env.
3644

3645
    """
3646
    assert self.target_uuids
3647

    
3648
    env = {
3649
      "TARGET_GROUPS": " ".join(self.target_uuids),
3650
      }
3651

    
3652
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3653

    
3654
    return env
3655

    
3656
  def BuildHooksNodes(self):
3657
    """Build hooks nodes.
3658

3659
    """
3660
    mn = self.cfg.GetMasterNode()
3661
    return ([mn], [mn])
3662

    
3663
  def Exec(self, feedback_fn):
3664
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3665

    
3666
    assert instances == [self.op.instance_name], "Instance not locked"
3667

    
3668
    req = iallocator.IAReqGroupChange(instances=instances,
3669
                                      target_groups=list(self.target_uuids))
3670
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3671

    
3672
    ial.Run(self.op.iallocator)
3673

    
3674
    if not ial.success:
3675
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3676
                                 " instance '%s' using iallocator '%s': %s" %
3677
                                 (self.op.instance_name, self.op.iallocator,
3678
                                  ial.info), errors.ECODE_NORES)
3679

    
3680
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3681

    
3682
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3683
                 " instance '%s'", len(jobs), self.op.instance_name)
3684

    
3685
    return ResultWithJobs(jobs)