Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ f56ab6d1

History | View | Annotate | Download (142.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
187
      raise errors.OpPrereqError("VLAN is given, but network mode is not"
188
                                 " openvswitch", errors.ECODE_INVAL)
189

    
190
    # ip validity checks
191
    if ip is None or ip.lower() == constants.VALUE_NONE:
192
      nic_ip = None
193
    elif ip.lower() == constants.VALUE_AUTO:
194
      if not op.name_check:
195
        raise errors.OpPrereqError("IP address set to auto but name checks"
196
                                   " have been skipped",
197
                                   errors.ECODE_INVAL)
198
      nic_ip = default_ip
199
    else:
200
      # We defer pool operations until later, so that the iallocator has
201
      # filled in the instance's node(s) dimara
202
      if ip.lower() == constants.NIC_IP_POOL:
203
        if net is None:
204
          raise errors.OpPrereqError("if ip=pool, parameter network"
205
                                     " must be passed too",
206
                                     errors.ECODE_INVAL)
207

    
208
      elif not netutils.IPAddress.IsValid(ip):
209
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210
                                   errors.ECODE_INVAL)
211

    
212
      nic_ip = ip
213

    
214
    # TODO: check the ip address for uniqueness
215
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
217
                                 errors.ECODE_INVAL)
218

    
219
    # MAC address verification
220
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222
      mac = utils.NormalizeAndValidateMac(mac)
223

    
224
      try:
225
        # TODO: We need to factor this out
226
        cfg.ReserveMAC(mac, ec_id)
227
      except errors.ReservationError:
228
        raise errors.OpPrereqError("MAC address %s already in use"
229
                                   " in cluster" % mac,
230
                                   errors.ECODE_NOTUNIQUE)
231

    
232
    #  Build nic parameters
233
    nicparams = {}
234
    if nic_mode_req:
235
      nicparams[constants.NIC_MODE] = nic_mode
236
    if link:
237
      nicparams[constants.NIC_LINK] = link
238
    if vlan:
239
      nicparams[constants.NIC_VLAN] = vlan
240

    
241
    check_params = cluster.SimpleFillNIC(nicparams)
242
    objects.NIC.CheckParameterSyntax(check_params)
243
    net_uuid = cfg.LookupNetwork(net)
244
    name = nic.get(constants.INIC_NAME, None)
245
    if name is not None and name.lower() == constants.VALUE_NONE:
246
      name = None
247
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248
                          network=net_uuid, nicparams=nicparams)
249
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
250
    nics.append(nic_obj)
251

    
252
  return nics
253

    
254

    
255
def _CheckForConflictingIp(lu, ip, node_uuid):
256
  """In case of conflicting IP address raise error.
257

258
  @type ip: string
259
  @param ip: IP address
260
  @type node_uuid: string
261
  @param node_uuid: node UUID
262

263
  """
264
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
265
  if conf_net is not None:
266
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267
                                " network %s, but the target NIC does not." %
268
                                (ip, conf_net)),
269
                               errors.ECODE_STATE)
270

    
271
  return (None, None)
272

    
273

    
274
def _ComputeIPolicyInstanceSpecViolation(
275
  ipolicy, instance_spec, disk_template,
276
  _compute_fn=ComputeIPolicySpecViolation):
277
  """Compute if instance specs meets the specs of ipolicy.
278

279
  @type ipolicy: dict
280
  @param ipolicy: The ipolicy to verify against
281
  @param instance_spec: dict
282
  @param instance_spec: The instance spec to verify
283
  @type disk_template: string
284
  @param disk_template: the disk template of the instance
285
  @param _compute_fn: The function to verify ipolicy (unittest only)
286
  @see: L{ComputeIPolicySpecViolation}
287

288
  """
289
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
295

    
296
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297
                     disk_sizes, spindle_use, disk_template)
298

    
299

    
300
def _CheckOSVariant(os_obj, name):
301
  """Check whether an OS name conforms to the os variants specification.
302

303
  @type os_obj: L{objects.OS}
304
  @param os_obj: OS object to check
305
  @type name: string
306
  @param name: OS name passed by the user, to check for validity
307

308
  """
309
  variant = objects.OS.GetVariant(name)
310
  if not os_obj.supported_variants:
311
    if variant:
312
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313
                                 " passed)" % (os_obj.name, variant),
314
                                 errors.ECODE_INVAL)
315
    return
316
  if not variant:
317
    raise errors.OpPrereqError("OS name must include a variant",
318
                               errors.ECODE_INVAL)
319

    
320
  if variant not in os_obj.supported_variants:
321
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
322

    
323

    
324
class LUInstanceCreate(LogicalUnit):
325
  """Create an instance.
326

327
  """
328
  HPATH = "instance-add"
329
  HTYPE = constants.HTYPE_INSTANCE
330
  REQ_BGL = False
331

    
332
  def _CheckDiskTemplateValid(self):
333
    """Checks validity of disk template.
334

335
    """
336
    cluster = self.cfg.GetClusterInfo()
337
    if self.op.disk_template is None:
338
      # FIXME: It would be better to take the default disk template from the
339
      # ipolicy, but for the ipolicy we need the primary node, which we get from
340
      # the iallocator, which wants the disk template as input. To solve this
341
      # chicken-and-egg problem, it should be possible to specify just a node
342
      # group from the iallocator and take the ipolicy from that.
343
      self.op.disk_template = cluster.enabled_disk_templates[0]
344
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
345

    
346
  def _CheckDiskArguments(self):
347
    """Checks validity of disk-related arguments.
348

349
    """
350
    # check that disk's names are unique and valid
351
    utils.ValidateDeviceNames("disk", self.op.disks)
352

    
353
    self._CheckDiskTemplateValid()
354

    
355
    # check disks. parameter names and consistent adopt/no-adopt strategy
356
    has_adopt = has_no_adopt = False
357
    for disk in self.op.disks:
358
      if self.op.disk_template != constants.DT_EXT:
359
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
360
      if constants.IDISK_ADOPT in disk:
361
        has_adopt = True
362
      else:
363
        has_no_adopt = True
364
    if has_adopt and has_no_adopt:
365
      raise errors.OpPrereqError("Either all disks are adopted or none is",
366
                                 errors.ECODE_INVAL)
367
    if has_adopt:
368
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
369
        raise errors.OpPrereqError("Disk adoption is not supported for the"
370
                                   " '%s' disk template" %
371
                                   self.op.disk_template,
372
                                   errors.ECODE_INVAL)
373
      if self.op.iallocator is not None:
374
        raise errors.OpPrereqError("Disk adoption not allowed with an"
375
                                   " iallocator script", errors.ECODE_INVAL)
376
      if self.op.mode == constants.INSTANCE_IMPORT:
377
        raise errors.OpPrereqError("Disk adoption not allowed for"
378
                                   " instance import", errors.ECODE_INVAL)
379
    else:
380
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
381
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
382
                                   " but no 'adopt' parameter given" %
383
                                   self.op.disk_template,
384
                                   errors.ECODE_INVAL)
385

    
386
    self.adopt_disks = has_adopt
387

    
388
  def _CheckVLANArguments(self):
389
    """ Check validity of VLANs if given
390

391
    """
392
    for nic in self.op.nics:
393
      vlan = nic.get(constants.INIC_VLAN, None)
394
      if vlan:
395
        if vlan[0] == ".":
396
          # vlan starting with dot means single untagged vlan,
397
          # might be followed by trunk (:)
398
          if not vlan[1:].isdigit():
399
            vlanlist = vlan[1:].split(':')
400
            for vl in vlanlist:
401
              if not vl.isdigit():
402
                raise errors.OpPrereqError("Specified VLAN parameter is "
403
                                           "invalid : %s" % vlan,
404
                                             errors.ECODE_INVAL)
405
        elif vlan[0] == ":":
406
          # Trunk - tagged only
407
          vlanlist = vlan[1:].split(':')
408
          for vl in vlanlist:
409
            if not vl.isdigit():
410
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                           " : %s" % vlan, errors.ECODE_INVAL)
412
        elif vlan.isdigit():
413
          # This is the simplest case. No dots, only single digit
414
          # -> Create untagged access port, dot needs to be added
415
          nic[constants.INIC_VLAN] = "." + vlan
416
        else:
417
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418
                                       " : %s" % vlan, errors.ECODE_INVAL)
419

    
420
  def CheckArguments(self):
421
    """Check arguments.
422

423
    """
424
    # do not require name_check to ease forward/backward compatibility
425
    # for tools
426
    if self.op.no_install and self.op.start:
427
      self.LogInfo("No-installation mode selected, disabling startup")
428
      self.op.start = False
429
    # validate/normalize the instance name
430
    self.op.instance_name = \
431
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
432

    
433
    if self.op.ip_check and not self.op.name_check:
434
      # TODO: make the ip check more flexible and not depend on the name check
435
      raise errors.OpPrereqError("Cannot do IP address check without a name"
436
                                 " check", errors.ECODE_INVAL)
437

    
438
    # check nics' parameter names
439
    for nic in self.op.nics:
440
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
441
    # check that NIC's parameters names are unique and valid
442
    utils.ValidateDeviceNames("NIC", self.op.nics)
443

    
444
    self._CheckVLANArguments()
445

    
446
    self._CheckDiskArguments()
447
    assert self.op.disk_template is not None
448

    
449
    # instance name verification
450
    if self.op.name_check:
451
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
452
      self.op.instance_name = self.hostname.name
453
      # used in CheckPrereq for ip ping check
454
      self.check_ip = self.hostname.ip
455
    else:
456
      self.check_ip = None
457

    
458
    # file storage checks
459
    if (self.op.file_driver and
460
        not self.op.file_driver in constants.FILE_DRIVER):
461
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
462
                                 self.op.file_driver, errors.ECODE_INVAL)
463

    
464
    # set default file_driver if unset and required
465
    if (not self.op.file_driver and
466
        self.op.disk_template in [constants.DT_FILE,
467
                                  constants.DT_SHARED_FILE]):
468
      self.op.file_driver = constants.FD_LOOP
469

    
470
    ### Node/iallocator related checks
471
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
472

    
473
    if self.op.pnode is not None:
474
      if self.op.disk_template in constants.DTS_INT_MIRROR:
475
        if self.op.snode is None:
476
          raise errors.OpPrereqError("The networked disk templates need"
477
                                     " a mirror node", errors.ECODE_INVAL)
478
      elif self.op.snode:
479
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
480
                        " template")
481
        self.op.snode = None
482

    
483
    _CheckOpportunisticLocking(self.op)
484

    
485
    if self.op.mode == constants.INSTANCE_IMPORT:
486
      # On import force_variant must be True, because if we forced it at
487
      # initial install, our only chance when importing it back is that it
488
      # works again!
489
      self.op.force_variant = True
490

    
491
      if self.op.no_install:
492
        self.LogInfo("No-installation mode has no effect during import")
493

    
494
    elif self.op.mode == constants.INSTANCE_CREATE:
495
      if self.op.os_type is None:
496
        raise errors.OpPrereqError("No guest OS specified",
497
                                   errors.ECODE_INVAL)
498
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
499
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
500
                                   " installation" % self.op.os_type,
501
                                   errors.ECODE_STATE)
502
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
503
      self._cds = GetClusterDomainSecret()
504

    
505
      # Check handshake to ensure both clusters have the same domain secret
506
      src_handshake = self.op.source_handshake
507
      if not src_handshake:
508
        raise errors.OpPrereqError("Missing source handshake",
509
                                   errors.ECODE_INVAL)
510

    
511
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
512
                                                           src_handshake)
513
      if errmsg:
514
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
515
                                   errors.ECODE_INVAL)
516

    
517
      # Load and check source CA
518
      self.source_x509_ca_pem = self.op.source_x509_ca
519
      if not self.source_x509_ca_pem:
520
        raise errors.OpPrereqError("Missing source X509 CA",
521
                                   errors.ECODE_INVAL)
522

    
523
      try:
524
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
525
                                                    self._cds)
526
      except OpenSSL.crypto.Error, err:
527
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
528
                                   (err, ), errors.ECODE_INVAL)
529

    
530
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
531
      if errcode is not None:
532
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
533
                                   errors.ECODE_INVAL)
534

    
535
      self.source_x509_ca = cert
536

    
537
      src_instance_name = self.op.source_instance_name
538
      if not src_instance_name:
539
        raise errors.OpPrereqError("Missing source instance name",
540
                                   errors.ECODE_INVAL)
541

    
542
      self.source_instance_name = \
543
        netutils.GetHostname(name=src_instance_name).name
544

    
545
    else:
546
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
547
                                 self.op.mode, errors.ECODE_INVAL)
548

    
549
  def ExpandNames(self):
550
    """ExpandNames for CreateInstance.
551

552
    Figure out the right locks for instance creation.
553

554
    """
555
    self.needed_locks = {}
556

    
557
    # this is just a preventive check, but someone might still add this
558
    # instance in the meantime, and creation will fail at lock-add time
559
    if self.op.instance_name in\
560
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
561
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
562
                                 self.op.instance_name, errors.ECODE_EXISTS)
563

    
564
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
565

    
566
    if self.op.iallocator:
567
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
568
      # specifying a group on instance creation and then selecting nodes from
569
      # that group
570
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
571
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
572

    
573
      if self.op.opportunistic_locking:
574
        self.opportunistic_locks[locking.LEVEL_NODE] = True
575
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
576
    else:
577
      (self.op.pnode_uuid, self.op.pnode) = \
578
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
579
      nodelist = [self.op.pnode_uuid]
580
      if self.op.snode is not None:
581
        (self.op.snode_uuid, self.op.snode) = \
582
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
583
        nodelist.append(self.op.snode_uuid)
584
      self.needed_locks[locking.LEVEL_NODE] = nodelist
585

    
586
    # in case of import lock the source node too
587
    if self.op.mode == constants.INSTANCE_IMPORT:
588
      src_node = self.op.src_node
589
      src_path = self.op.src_path
590

    
591
      if src_path is None:
592
        self.op.src_path = src_path = self.op.instance_name
593

    
594
      if src_node is None:
595
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
596
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
597
        self.op.src_node = None
598
        if os.path.isabs(src_path):
599
          raise errors.OpPrereqError("Importing an instance from a path"
600
                                     " requires a source node option",
601
                                     errors.ECODE_INVAL)
602
      else:
603
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
604
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
605
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
606
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
607
        if not os.path.isabs(src_path):
608
          self.op.src_path = \
609
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
610

    
611
    self.needed_locks[locking.LEVEL_NODE_RES] = \
612
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
613

    
614
  def _RunAllocator(self):
615
    """Run the allocator based on input opcode.
616

617
    """
618
    if self.op.opportunistic_locking:
619
      # Only consider nodes for which a lock is held
620
      node_name_whitelist = self.cfg.GetNodeNames(
621
        self.owned_locks(locking.LEVEL_NODE))
622
    else:
623
      node_name_whitelist = None
624

    
625
    req = _CreateInstanceAllocRequest(self.op, self.disks,
626
                                      self.nics, self.be_full,
627
                                      node_name_whitelist)
628
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
629

    
630
    ial.Run(self.op.iallocator)
631

    
632
    if not ial.success:
633
      # When opportunistic locks are used only a temporary failure is generated
634
      if self.op.opportunistic_locking:
635
        ecode = errors.ECODE_TEMP_NORES
636
      else:
637
        ecode = errors.ECODE_NORES
638

    
639
      raise errors.OpPrereqError("Can't compute nodes using"
640
                                 " iallocator '%s': %s" %
641
                                 (self.op.iallocator, ial.info),
642
                                 ecode)
643

    
644
    (self.op.pnode_uuid, self.op.pnode) = \
645
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
646
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
647
                 self.op.instance_name, self.op.iallocator,
648
                 utils.CommaJoin(ial.result))
649

    
650
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
651

    
652
    if req.RequiredNodes() == 2:
653
      (self.op.snode_uuid, self.op.snode) = \
654
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
655

    
656
  def BuildHooksEnv(self):
657
    """Build hooks env.
658

659
    This runs on master, primary and secondary nodes of the instance.
660

661
    """
662
    env = {
663
      "ADD_MODE": self.op.mode,
664
      }
665
    if self.op.mode == constants.INSTANCE_IMPORT:
666
      env["SRC_NODE"] = self.op.src_node
667
      env["SRC_PATH"] = self.op.src_path
668
      env["SRC_IMAGES"] = self.src_images
669

    
670
    env.update(BuildInstanceHookEnv(
671
      name=self.op.instance_name,
672
      primary_node_name=self.op.pnode,
673
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
674
      status=self.op.start,
675
      os_type=self.op.os_type,
676
      minmem=self.be_full[constants.BE_MINMEM],
677
      maxmem=self.be_full[constants.BE_MAXMEM],
678
      vcpus=self.be_full[constants.BE_VCPUS],
679
      nics=NICListToTuple(self, self.nics),
680
      disk_template=self.op.disk_template,
681
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
682
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
683
             for d in self.disks],
684
      bep=self.be_full,
685
      hvp=self.hv_full,
686
      hypervisor_name=self.op.hypervisor,
687
      tags=self.op.tags,
688
      ))
689

    
690
    return env
691

    
692
  def BuildHooksNodes(self):
693
    """Build hooks nodes.
694

695
    """
696
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
697
    return nl, nl
698

    
699
  def _ReadExportInfo(self):
700
    """Reads the export information from disk.
701

702
    It will override the opcode source node and path with the actual
703
    information, if these two were not specified before.
704

705
    @return: the export information
706

707
    """
708
    assert self.op.mode == constants.INSTANCE_IMPORT
709

    
710
    if self.op.src_node_uuid is None:
711
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
712
      exp_list = self.rpc.call_export_list(locked_nodes)
713
      found = False
714
      for node_uuid in exp_list:
715
        if exp_list[node_uuid].fail_msg:
716
          continue
717
        if self.op.src_path in exp_list[node_uuid].payload:
718
          found = True
719
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
720
          self.op.src_node_uuid = node_uuid
721
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
722
                                            self.op.src_path)
723
          break
724
      if not found:
725
        raise errors.OpPrereqError("No export found for relative path %s" %
726
                                   self.op.src_path, errors.ECODE_INVAL)
727

    
728
    CheckNodeOnline(self, self.op.src_node_uuid)
729
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
730
    result.Raise("No export or invalid export found in dir %s" %
731
                 self.op.src_path)
732

    
733
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
734
    if not export_info.has_section(constants.INISECT_EXP):
735
      raise errors.ProgrammerError("Corrupted export config",
736
                                   errors.ECODE_ENVIRON)
737

    
738
    ei_version = export_info.get(constants.INISECT_EXP, "version")
739
    if int(ei_version) != constants.EXPORT_VERSION:
740
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
741
                                 (ei_version, constants.EXPORT_VERSION),
742
                                 errors.ECODE_ENVIRON)
743
    return export_info
744

    
745
  def _ReadExportParams(self, einfo):
746
    """Use export parameters as defaults.
747

748
    In case the opcode doesn't specify (as in override) some instance
749
    parameters, then try to use them from the export information, if
750
    that declares them.
751

752
    """
753
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
754

    
755
    if not self.op.disks:
756
      disks = []
757
      # TODO: import the disk iv_name too
758
      for idx in range(constants.MAX_DISKS):
759
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
760
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
761
          disks.append({constants.IDISK_SIZE: disk_sz})
762
      self.op.disks = disks
763
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
764
        raise errors.OpPrereqError("No disk info specified and the export"
765
                                   " is missing the disk information",
766
                                   errors.ECODE_INVAL)
767

    
768
    if not self.op.nics:
769
      nics = []
770
      for idx in range(constants.MAX_NICS):
771
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
772
          ndict = {}
773
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
774
            nic_param_name = "nic%d_%s" % (idx, name)
775
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
776
              v = einfo.get(constants.INISECT_INS, nic_param_name)
777
              ndict[name] = v
778
          nics.append(ndict)
779
        else:
780
          break
781
      self.op.nics = nics
782

    
783
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
784
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
785

    
786
    if (self.op.hypervisor is None and
787
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
788
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
789

    
790
    if einfo.has_section(constants.INISECT_HYP):
791
      # use the export parameters but do not override the ones
792
      # specified by the user
793
      for name, value in einfo.items(constants.INISECT_HYP):
794
        if name not in self.op.hvparams:
795
          self.op.hvparams[name] = value
796

    
797
    if einfo.has_section(constants.INISECT_BEP):
798
      # use the parameters, without overriding
799
      for name, value in einfo.items(constants.INISECT_BEP):
800
        if name not in self.op.beparams:
801
          self.op.beparams[name] = value
802
        # Compatibility for the old "memory" be param
803
        if name == constants.BE_MEMORY:
804
          if constants.BE_MAXMEM not in self.op.beparams:
805
            self.op.beparams[constants.BE_MAXMEM] = value
806
          if constants.BE_MINMEM not in self.op.beparams:
807
            self.op.beparams[constants.BE_MINMEM] = value
808
    else:
809
      # try to read the parameters old style, from the main section
810
      for name in constants.BES_PARAMETERS:
811
        if (name not in self.op.beparams and
812
            einfo.has_option(constants.INISECT_INS, name)):
813
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
814

    
815
    if einfo.has_section(constants.INISECT_OSP):
816
      # use the parameters, without overriding
817
      for name, value in einfo.items(constants.INISECT_OSP):
818
        if name not in self.op.osparams:
819
          self.op.osparams[name] = value
820

    
821
  def _RevertToDefaults(self, cluster):
822
    """Revert the instance parameters to the default values.
823

824
    """
825
    # hvparams
826
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
827
    for name in self.op.hvparams.keys():
828
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
829
        del self.op.hvparams[name]
830
    # beparams
831
    be_defs = cluster.SimpleFillBE({})
832
    for name in self.op.beparams.keys():
833
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
834
        del self.op.beparams[name]
835
    # nic params
836
    nic_defs = cluster.SimpleFillNIC({})
837
    for nic in self.op.nics:
838
      for name in constants.NICS_PARAMETERS:
839
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
840
          del nic[name]
841
    # osparams
842
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
843
    for name in self.op.osparams.keys():
844
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
845
        del self.op.osparams[name]
846

    
847
  def _CalculateFileStorageDir(self):
848
    """Calculate final instance file storage dir.
849

850
    """
851
    # file storage dir calculation/check
852
    self.instance_file_storage_dir = None
853
    if self.op.disk_template in constants.DTS_FILEBASED:
854
      # build the full file storage dir path
855
      joinargs = []
856

    
857
      if self.op.disk_template == constants.DT_SHARED_FILE:
858
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
859
      else:
860
        get_fsd_fn = self.cfg.GetFileStorageDir
861

    
862
      cfg_storagedir = get_fsd_fn()
863
      if not cfg_storagedir:
864
        raise errors.OpPrereqError("Cluster file storage dir not defined",
865
                                   errors.ECODE_STATE)
866
      joinargs.append(cfg_storagedir)
867

    
868
      if self.op.file_storage_dir is not None:
869
        joinargs.append(self.op.file_storage_dir)
870

    
871
      joinargs.append(self.op.instance_name)
872

    
873
      # pylint: disable=W0142
874
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
875

    
876
  def CheckPrereq(self): # pylint: disable=R0914
877
    """Check prerequisites.
878

879
    """
880
    self._CalculateFileStorageDir()
881

    
882
    if self.op.mode == constants.INSTANCE_IMPORT:
883
      export_info = self._ReadExportInfo()
884
      self._ReadExportParams(export_info)
885
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
886
    else:
887
      self._old_instance_name = None
888

    
889
    if (not self.cfg.GetVGName() and
890
        self.op.disk_template not in constants.DTS_NOT_LVM):
891
      raise errors.OpPrereqError("Cluster does not support lvm-based"
892
                                 " instances", errors.ECODE_STATE)
893

    
894
    if (self.op.hypervisor is None or
895
        self.op.hypervisor == constants.VALUE_AUTO):
896
      self.op.hypervisor = self.cfg.GetHypervisorType()
897

    
898
    cluster = self.cfg.GetClusterInfo()
899
    enabled_hvs = cluster.enabled_hypervisors
900
    if self.op.hypervisor not in enabled_hvs:
901
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
902
                                 " cluster (%s)" %
903
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
904
                                 errors.ECODE_STATE)
905

    
906
    # Check tag validity
907
    for tag in self.op.tags:
908
      objects.TaggableObject.ValidateTag(tag)
909

    
910
    # check hypervisor parameter syntax (locally)
911
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
912
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
913
                                      self.op.hvparams)
914
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
915
    hv_type.CheckParameterSyntax(filled_hvp)
916
    self.hv_full = filled_hvp
917
    # check that we don't specify global parameters on an instance
918
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
919
                         "instance", "cluster")
920

    
921
    # fill and remember the beparams dict
922
    self.be_full = _ComputeFullBeParams(self.op, cluster)
923

    
924
    # build os parameters
925
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
926

    
927
    # now that hvp/bep are in final format, let's reset to defaults,
928
    # if told to do so
929
    if self.op.identify_defaults:
930
      self._RevertToDefaults(cluster)
931

    
932
    # NIC buildup
933
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
934
                             self.proc.GetECId())
935

    
936
    # disk checks/pre-build
937
    default_vg = self.cfg.GetVGName()
938
    self.disks = ComputeDisks(self.op, default_vg)
939

    
940
    if self.op.mode == constants.INSTANCE_IMPORT:
941
      disk_images = []
942
      for idx in range(len(self.disks)):
943
        option = "disk%d_dump" % idx
944
        if export_info.has_option(constants.INISECT_INS, option):
945
          # FIXME: are the old os-es, disk sizes, etc. useful?
946
          export_name = export_info.get(constants.INISECT_INS, option)
947
          image = utils.PathJoin(self.op.src_path, export_name)
948
          disk_images.append(image)
949
        else:
950
          disk_images.append(False)
951

    
952
      self.src_images = disk_images
953

    
954
      if self.op.instance_name == self._old_instance_name:
955
        for idx, nic in enumerate(self.nics):
956
          if nic.mac == constants.VALUE_AUTO:
957
            nic_mac_ini = "nic%d_mac" % idx
958
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
959

    
960
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
961

    
962
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
963
    if self.op.ip_check:
964
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
965
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
966
                                   (self.check_ip, self.op.instance_name),
967
                                   errors.ECODE_NOTUNIQUE)
968

    
969
    #### mac address generation
970
    # By generating here the mac address both the allocator and the hooks get
971
    # the real final mac address rather than the 'auto' or 'generate' value.
972
    # There is a race condition between the generation and the instance object
973
    # creation, which means that we know the mac is valid now, but we're not
974
    # sure it will be when we actually add the instance. If things go bad
975
    # adding the instance will abort because of a duplicate mac, and the
976
    # creation job will fail.
977
    for nic in self.nics:
978
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
979
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
980

    
981
    #### allocator run
982

    
983
    if self.op.iallocator is not None:
984
      self._RunAllocator()
985

    
986
    # Release all unneeded node locks
987
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
988
                               self.op.src_node_uuid])
989
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
990
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
991
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
992

    
993
    assert (self.owned_locks(locking.LEVEL_NODE) ==
994
            self.owned_locks(locking.LEVEL_NODE_RES)), \
995
      "Node locks differ from node resource locks"
996

    
997
    #### node related checks
998

    
999
    # check primary node
1000
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1001
    assert self.pnode is not None, \
1002
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1003
    if pnode.offline:
1004
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1005
                                 pnode.name, errors.ECODE_STATE)
1006
    if pnode.drained:
1007
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1008
                                 pnode.name, errors.ECODE_STATE)
1009
    if not pnode.vm_capable:
1010
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1011
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1012

    
1013
    self.secondaries = []
1014

    
1015
    # Fill in any IPs from IP pools. This must happen here, because we need to
1016
    # know the nic's primary node, as specified by the iallocator
1017
    for idx, nic in enumerate(self.nics):
1018
      net_uuid = nic.network
1019
      if net_uuid is not None:
1020
        nobj = self.cfg.GetNetwork(net_uuid)
1021
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1022
        if netparams is None:
1023
          raise errors.OpPrereqError("No netparams found for network"
1024
                                     " %s. Probably not connected to"
1025
                                     " node's %s nodegroup" %
1026
                                     (nobj.name, self.pnode.name),
1027
                                     errors.ECODE_INVAL)
1028
        self.LogInfo("NIC/%d inherits netparams %s" %
1029
                     (idx, netparams.values()))
1030
        nic.nicparams = dict(netparams)
1031
        if nic.ip is not None:
1032
          if nic.ip.lower() == constants.NIC_IP_POOL:
1033
            try:
1034
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1035
            except errors.ReservationError:
1036
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1037
                                         " from the address pool" % idx,
1038
                                         errors.ECODE_STATE)
1039
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1040
          else:
1041
            try:
1042
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1043
            except errors.ReservationError:
1044
              raise errors.OpPrereqError("IP address %s already in use"
1045
                                         " or does not belong to network %s" %
1046
                                         (nic.ip, nobj.name),
1047
                                         errors.ECODE_NOTUNIQUE)
1048

    
1049
      # net is None, ip None or given
1050
      elif self.op.conflicts_check:
1051
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1052

    
1053
    # mirror node verification
1054
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1055
      if self.op.snode_uuid == pnode.uuid:
1056
        raise errors.OpPrereqError("The secondary node cannot be the"
1057
                                   " primary node", errors.ECODE_INVAL)
1058
      CheckNodeOnline(self, self.op.snode_uuid)
1059
      CheckNodeNotDrained(self, self.op.snode_uuid)
1060
      CheckNodeVmCapable(self, self.op.snode_uuid)
1061
      self.secondaries.append(self.op.snode_uuid)
1062

    
1063
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1064
      if pnode.group != snode.group:
1065
        self.LogWarning("The primary and secondary nodes are in two"
1066
                        " different node groups; the disk parameters"
1067
                        " from the first disk's node group will be"
1068
                        " used")
1069

    
1070
    nodes = [pnode]
1071
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1072
      nodes.append(snode)
1073
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1074
    excl_stor = compat.any(map(has_es, nodes))
1075
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1076
      raise errors.OpPrereqError("Disk template %s not supported with"
1077
                                 " exclusive storage" % self.op.disk_template,
1078
                                 errors.ECODE_STATE)
1079
    for disk in self.disks:
1080
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1081

    
1082
    node_uuids = [pnode.uuid] + self.secondaries
1083

    
1084
    if not self.adopt_disks:
1085
      if self.op.disk_template == constants.DT_RBD:
1086
        # _CheckRADOSFreeSpace() is just a placeholder.
1087
        # Any function that checks prerequisites can be placed here.
1088
        # Check if there is enough space on the RADOS cluster.
1089
        CheckRADOSFreeSpace()
1090
      elif self.op.disk_template == constants.DT_EXT:
1091
        # FIXME: Function that checks prereqs if needed
1092
        pass
1093
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1094
        # Check lv size requirements, if not adopting
1095
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1096
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1097
      else:
1098
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1099
        pass
1100

    
1101
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1102
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1103
                                disk[constants.IDISK_ADOPT])
1104
                     for disk in self.disks])
1105
      if len(all_lvs) != len(self.disks):
1106
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1107
                                   errors.ECODE_INVAL)
1108
      for lv_name in all_lvs:
1109
        try:
1110
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1111
          # to ReserveLV uses the same syntax
1112
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1113
        except errors.ReservationError:
1114
          raise errors.OpPrereqError("LV named %s used by another instance" %
1115
                                     lv_name, errors.ECODE_NOTUNIQUE)
1116

    
1117
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1118
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1119

    
1120
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1121
                                       vg_names.payload.keys())[pnode.uuid]
1122
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1123
      node_lvs = node_lvs.payload
1124

    
1125
      delta = all_lvs.difference(node_lvs.keys())
1126
      if delta:
1127
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1128
                                   utils.CommaJoin(delta),
1129
                                   errors.ECODE_INVAL)
1130
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1131
      if online_lvs:
1132
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1133
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1134
                                   errors.ECODE_STATE)
1135
      # update the size of disk based on what is found
1136
      for dsk in self.disks:
1137
        dsk[constants.IDISK_SIZE] = \
1138
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1139
                                        dsk[constants.IDISK_ADOPT])][0]))
1140

    
1141
    elif self.op.disk_template == constants.DT_BLOCK:
1142
      # Normalize and de-duplicate device paths
1143
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1144
                       for disk in self.disks])
1145
      if len(all_disks) != len(self.disks):
1146
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1147
                                   errors.ECODE_INVAL)
1148
      baddisks = [d for d in all_disks
1149
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1150
      if baddisks:
1151
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1152
                                   " cannot be adopted" %
1153
                                   (utils.CommaJoin(baddisks),
1154
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1155
                                   errors.ECODE_INVAL)
1156

    
1157
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1158
                                            list(all_disks))[pnode.uuid]
1159
      node_disks.Raise("Cannot get block device information from node %s" %
1160
                       pnode.name)
1161
      node_disks = node_disks.payload
1162
      delta = all_disks.difference(node_disks.keys())
1163
      if delta:
1164
        raise errors.OpPrereqError("Missing block device(s): %s" %
1165
                                   utils.CommaJoin(delta),
1166
                                   errors.ECODE_INVAL)
1167
      for dsk in self.disks:
1168
        dsk[constants.IDISK_SIZE] = \
1169
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1170

    
1171
    # Verify instance specs
1172
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1173
    ispec = {
1174
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1175
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1176
      constants.ISPEC_DISK_COUNT: len(self.disks),
1177
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1178
                                  for disk in self.disks],
1179
      constants.ISPEC_NIC_COUNT: len(self.nics),
1180
      constants.ISPEC_SPINDLE_USE: spindle_use,
1181
      }
1182

    
1183
    group_info = self.cfg.GetNodeGroup(pnode.group)
1184
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1185
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1186
                                               self.op.disk_template)
1187
    if not self.op.ignore_ipolicy and res:
1188
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1189
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1190
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1191

    
1192
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1193

    
1194
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1195
    # check OS parameters (remotely)
1196
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1197

    
1198
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1199

    
1200
    #TODO: _CheckExtParams (remotely)
1201
    # Check parameters for extstorage
1202

    
1203
    # memory check on primary node
1204
    #TODO(dynmem): use MINMEM for checking
1205
    if self.op.start:
1206
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1207
                                self.op.hvparams)
1208
      CheckNodeFreeMemory(self, self.pnode.uuid,
1209
                          "creating instance %s" % self.op.instance_name,
1210
                          self.be_full[constants.BE_MAXMEM],
1211
                          self.op.hypervisor, hvfull)
1212

    
1213
    self.dry_run_result = list(node_uuids)
1214

    
1215
  def Exec(self, feedback_fn):
1216
    """Create and add the instance to the cluster.
1217

1218
    """
1219
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1220
                self.owned_locks(locking.LEVEL_NODE)), \
1221
      "Node locks differ from node resource locks"
1222
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1223

    
1224
    ht_kind = self.op.hypervisor
1225
    if ht_kind in constants.HTS_REQ_PORT:
1226
      network_port = self.cfg.AllocatePort()
1227
    else:
1228
      network_port = None
1229

    
1230
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1231

    
1232
    # This is ugly but we got a chicken-egg problem here
1233
    # We can only take the group disk parameters, as the instance
1234
    # has no disks yet (we are generating them right here).
1235
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1236
    disks = GenerateDiskTemplate(self,
1237
                                 self.op.disk_template,
1238
                                 instance_uuid, self.pnode.uuid,
1239
                                 self.secondaries,
1240
                                 self.disks,
1241
                                 self.instance_file_storage_dir,
1242
                                 self.op.file_driver,
1243
                                 0,
1244
                                 feedback_fn,
1245
                                 self.cfg.GetGroupDiskParams(nodegroup))
1246

    
1247
    iobj = objects.Instance(name=self.op.instance_name,
1248
                            uuid=instance_uuid,
1249
                            os=self.op.os_type,
1250
                            primary_node=self.pnode.uuid,
1251
                            nics=self.nics, disks=disks,
1252
                            disk_template=self.op.disk_template,
1253
                            disks_active=False,
1254
                            admin_state=constants.ADMINST_DOWN,
1255
                            network_port=network_port,
1256
                            beparams=self.op.beparams,
1257
                            hvparams=self.op.hvparams,
1258
                            hypervisor=self.op.hypervisor,
1259
                            osparams=self.op.osparams,
1260
                            )
1261

    
1262
    if self.op.tags:
1263
      for tag in self.op.tags:
1264
        iobj.AddTag(tag)
1265

    
1266
    if self.adopt_disks:
1267
      if self.op.disk_template == constants.DT_PLAIN:
1268
        # rename LVs to the newly-generated names; we need to construct
1269
        # 'fake' LV disks with the old data, plus the new unique_id
1270
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1271
        rename_to = []
1272
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1273
          rename_to.append(t_dsk.logical_id)
1274
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1275
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1276
                                               zip(tmp_disks, rename_to))
1277
        result.Raise("Failed to rename adoped LVs")
1278
    else:
1279
      feedback_fn("* creating instance disks...")
1280
      try:
1281
        CreateDisks(self, iobj)
1282
      except errors.OpExecError:
1283
        self.LogWarning("Device creation failed")
1284
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1285
        raise
1286

    
1287
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1288

    
1289
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1290

    
1291
    # Declare that we don't want to remove the instance lock anymore, as we've
1292
    # added the instance to the config
1293
    del self.remove_locks[locking.LEVEL_INSTANCE]
1294

    
1295
    if self.op.mode == constants.INSTANCE_IMPORT:
1296
      # Release unused nodes
1297
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1298
    else:
1299
      # Release all nodes
1300
      ReleaseLocks(self, locking.LEVEL_NODE)
1301

    
1302
    disk_abort = False
1303
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1304
      feedback_fn("* wiping instance disks...")
1305
      try:
1306
        WipeDisks(self, iobj)
1307
      except errors.OpExecError, err:
1308
        logging.exception("Wiping disks failed")
1309
        self.LogWarning("Wiping instance disks failed (%s)", err)
1310
        disk_abort = True
1311

    
1312
    if disk_abort:
1313
      # Something is already wrong with the disks, don't do anything else
1314
      pass
1315
    elif self.op.wait_for_sync:
1316
      disk_abort = not WaitForSync(self, iobj)
1317
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1318
      # make sure the disks are not degraded (still sync-ing is ok)
1319
      feedback_fn("* checking mirrors status")
1320
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1321
    else:
1322
      disk_abort = False
1323

    
1324
    if disk_abort:
1325
      RemoveDisks(self, iobj)
1326
      self.cfg.RemoveInstance(iobj.uuid)
1327
      # Make sure the instance lock gets removed
1328
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1329
      raise errors.OpExecError("There are some degraded disks for"
1330
                               " this instance")
1331

    
1332
    # instance disks are now active
1333
    iobj.disks_active = True
1334

    
1335
    # Release all node resource locks
1336
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1337

    
1338
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1339
      if self.op.mode == constants.INSTANCE_CREATE:
1340
        if not self.op.no_install:
1341
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1342
                        not self.op.wait_for_sync)
1343
          if pause_sync:
1344
            feedback_fn("* pausing disk sync to install instance OS")
1345
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1346
                                                              (iobj.disks,
1347
                                                               iobj), True)
1348
            for idx, success in enumerate(result.payload):
1349
              if not success:
1350
                logging.warn("pause-sync of instance %s for disk %d failed",
1351
                             self.op.instance_name, idx)
1352

    
1353
          feedback_fn("* running the instance OS create scripts...")
1354
          # FIXME: pass debug option from opcode to backend
1355
          os_add_result = \
1356
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1357
                                          self.op.debug_level)
1358
          if pause_sync:
1359
            feedback_fn("* resuming disk sync")
1360
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1361
                                                              (iobj.disks,
1362
                                                               iobj), False)
1363
            for idx, success in enumerate(result.payload):
1364
              if not success:
1365
                logging.warn("resume-sync of instance %s for disk %d failed",
1366
                             self.op.instance_name, idx)
1367

    
1368
          os_add_result.Raise("Could not add os for instance %s"
1369
                              " on node %s" % (self.op.instance_name,
1370
                                               self.pnode.name))
1371

    
1372
      else:
1373
        if self.op.mode == constants.INSTANCE_IMPORT:
1374
          feedback_fn("* running the instance OS import scripts...")
1375

    
1376
          transfers = []
1377

    
1378
          for idx, image in enumerate(self.src_images):
1379
            if not image:
1380
              continue
1381

    
1382
            # FIXME: pass debug option from opcode to backend
1383
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1384
                                               constants.IEIO_FILE, (image, ),
1385
                                               constants.IEIO_SCRIPT,
1386
                                               ((iobj.disks[idx], iobj), idx),
1387
                                               None)
1388
            transfers.append(dt)
1389

    
1390
          import_result = \
1391
            masterd.instance.TransferInstanceData(self, feedback_fn,
1392
                                                  self.op.src_node_uuid,
1393
                                                  self.pnode.uuid,
1394
                                                  self.pnode.secondary_ip,
1395
                                                  iobj, transfers)
1396
          if not compat.all(import_result):
1397
            self.LogWarning("Some disks for instance %s on node %s were not"
1398
                            " imported successfully" % (self.op.instance_name,
1399
                                                        self.pnode.name))
1400

    
1401
          rename_from = self._old_instance_name
1402

    
1403
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1404
          feedback_fn("* preparing remote import...")
1405
          # The source cluster will stop the instance before attempting to make
1406
          # a connection. In some cases stopping an instance can take a long
1407
          # time, hence the shutdown timeout is added to the connection
1408
          # timeout.
1409
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1410
                             self.op.source_shutdown_timeout)
1411
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1412

    
1413
          assert iobj.primary_node == self.pnode.uuid
1414
          disk_results = \
1415
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1416
                                          self.source_x509_ca,
1417
                                          self._cds, timeouts)
1418
          if not compat.all(disk_results):
1419
            # TODO: Should the instance still be started, even if some disks
1420
            # failed to import (valid for local imports, too)?
1421
            self.LogWarning("Some disks for instance %s on node %s were not"
1422
                            " imported successfully" % (self.op.instance_name,
1423
                                                        self.pnode.name))
1424

    
1425
          rename_from = self.source_instance_name
1426

    
1427
        else:
1428
          # also checked in the prereq part
1429
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1430
                                       % self.op.mode)
1431

    
1432
        # Run rename script on newly imported instance
1433
        assert iobj.name == self.op.instance_name
1434
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1435
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1436
                                                   rename_from,
1437
                                                   self.op.debug_level)
1438
        result.Warn("Failed to run rename script for %s on node %s" %
1439
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1440

    
1441
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1442

    
1443
    if self.op.start:
1444
      iobj.admin_state = constants.ADMINST_UP
1445
      self.cfg.Update(iobj, feedback_fn)
1446
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1447
                   self.pnode.name)
1448
      feedback_fn("* starting instance...")
1449
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1450
                                            False, self.op.reason)
1451
      result.Raise("Could not start instance")
1452

    
1453
    return list(iobj.all_nodes)
1454

    
1455

    
1456
class LUInstanceRename(LogicalUnit):
1457
  """Rename an instance.
1458

1459
  """
1460
  HPATH = "instance-rename"
1461
  HTYPE = constants.HTYPE_INSTANCE
1462

    
1463
  def CheckArguments(self):
1464
    """Check arguments.
1465

1466
    """
1467
    if self.op.ip_check and not self.op.name_check:
1468
      # TODO: make the ip check more flexible and not depend on the name check
1469
      raise errors.OpPrereqError("IP address check requires a name check",
1470
                                 errors.ECODE_INVAL)
1471

    
1472
  def BuildHooksEnv(self):
1473
    """Build hooks env.
1474

1475
    This runs on master, primary and secondary nodes of the instance.
1476

1477
    """
1478
    env = BuildInstanceHookEnvByObject(self, self.instance)
1479
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1480
    return env
1481

    
1482
  def BuildHooksNodes(self):
1483
    """Build hooks nodes.
1484

1485
    """
1486
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1487
    return (nl, nl)
1488

    
1489
  def CheckPrereq(self):
1490
    """Check prerequisites.
1491

1492
    This checks that the instance is in the cluster and is not running.
1493

1494
    """
1495
    (self.op.instance_uuid, self.op.instance_name) = \
1496
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1497
                                self.op.instance_name)
1498
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1499
    assert instance is not None
1500

    
1501
    # It should actually not happen that an instance is running with a disabled
1502
    # disk template, but in case it does, the renaming of file-based instances
1503
    # will fail horribly. Thus, we test it before.
1504
    if (instance.disk_template in constants.DTS_FILEBASED and
1505
        self.op.new_name != instance.name):
1506
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1507
                               instance.disk_template)
1508

    
1509
    CheckNodeOnline(self, instance.primary_node)
1510
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1511
                       msg="cannot rename")
1512
    self.instance = instance
1513

    
1514
    new_name = self.op.new_name
1515
    if self.op.name_check:
1516
      hostname = _CheckHostnameSane(self, new_name)
1517
      new_name = self.op.new_name = hostname.name
1518
      if (self.op.ip_check and
1519
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1520
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1521
                                   (hostname.ip, new_name),
1522
                                   errors.ECODE_NOTUNIQUE)
1523

    
1524
    instance_names = [inst.name for
1525
                      inst in self.cfg.GetAllInstancesInfo().values()]
1526
    if new_name in instance_names and new_name != instance.name:
1527
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1528
                                 new_name, errors.ECODE_EXISTS)
1529

    
1530
  def Exec(self, feedback_fn):
1531
    """Rename the instance.
1532

1533
    """
1534
    old_name = self.instance.name
1535

    
1536
    rename_file_storage = False
1537
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1538
        self.op.new_name != self.instance.name):
1539
      old_file_storage_dir = os.path.dirname(
1540
                               self.instance.disks[0].logical_id[1])
1541
      rename_file_storage = True
1542

    
1543
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1544
    # Change the instance lock. This is definitely safe while we hold the BGL.
1545
    # Otherwise the new lock would have to be added in acquired mode.
1546
    assert self.REQ_BGL
1547
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1548
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1549
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1550

    
1551
    # re-read the instance from the configuration after rename
1552
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1553

    
1554
    if rename_file_storage:
1555
      new_file_storage_dir = os.path.dirname(
1556
                               renamed_inst.disks[0].logical_id[1])
1557
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1558
                                                     old_file_storage_dir,
1559
                                                     new_file_storage_dir)
1560
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1561
                   " (but the instance has been renamed in Ganeti)" %
1562
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1563
                    old_file_storage_dir, new_file_storage_dir))
1564

    
1565
    StartInstanceDisks(self, renamed_inst, None)
1566
    # update info on disks
1567
    info = GetInstanceInfoText(renamed_inst)
1568
    for (idx, disk) in enumerate(renamed_inst.disks):
1569
      for node_uuid in renamed_inst.all_nodes:
1570
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1571
                                                (disk, renamed_inst), info)
1572
        result.Warn("Error setting info on node %s for disk %s" %
1573
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1574
    try:
1575
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1576
                                                 renamed_inst, old_name,
1577
                                                 self.op.debug_level)
1578
      result.Warn("Could not run OS rename script for instance %s on node %s"
1579
                  " (but the instance has been renamed in Ganeti)" %
1580
                  (renamed_inst.name,
1581
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1582
                  self.LogWarning)
1583
    finally:
1584
      ShutdownInstanceDisks(self, renamed_inst)
1585

    
1586
    return renamed_inst.name
1587

    
1588

    
1589
class LUInstanceRemove(LogicalUnit):
1590
  """Remove an instance.
1591

1592
  """
1593
  HPATH = "instance-remove"
1594
  HTYPE = constants.HTYPE_INSTANCE
1595
  REQ_BGL = False
1596

    
1597
  def ExpandNames(self):
1598
    self._ExpandAndLockInstance()
1599
    self.needed_locks[locking.LEVEL_NODE] = []
1600
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1601
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1602

    
1603
  def DeclareLocks(self, level):
1604
    if level == locking.LEVEL_NODE:
1605
      self._LockInstancesNodes()
1606
    elif level == locking.LEVEL_NODE_RES:
1607
      # Copy node locks
1608
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1609
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1610

    
1611
  def BuildHooksEnv(self):
1612
    """Build hooks env.
1613

1614
    This runs on master, primary and secondary nodes of the instance.
1615

1616
    """
1617
    env = BuildInstanceHookEnvByObject(self, self.instance)
1618
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1619
    return env
1620

    
1621
  def BuildHooksNodes(self):
1622
    """Build hooks nodes.
1623

1624
    """
1625
    nl = [self.cfg.GetMasterNode()]
1626
    nl_post = list(self.instance.all_nodes) + nl
1627
    return (nl, nl_post)
1628

    
1629
  def CheckPrereq(self):
1630
    """Check prerequisites.
1631

1632
    This checks that the instance is in the cluster.
1633

1634
    """
1635
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1636
    assert self.instance is not None, \
1637
      "Cannot retrieve locked instance %s" % self.op.instance_name
1638

    
1639
  def Exec(self, feedback_fn):
1640
    """Remove the instance.
1641

1642
    """
1643
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1644
                 self.cfg.GetNodeName(self.instance.primary_node))
1645

    
1646
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1647
                                             self.instance,
1648
                                             self.op.shutdown_timeout,
1649
                                             self.op.reason)
1650
    if self.op.ignore_failures:
1651
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1652
    else:
1653
      result.Raise("Could not shutdown instance %s on node %s" %
1654
                   (self.instance.name,
1655
                    self.cfg.GetNodeName(self.instance.primary_node)))
1656

    
1657
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1658
            self.owned_locks(locking.LEVEL_NODE_RES))
1659
    assert not (set(self.instance.all_nodes) -
1660
                self.owned_locks(locking.LEVEL_NODE)), \
1661
      "Not owning correct locks"
1662

    
1663
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1664

    
1665

    
1666
class LUInstanceMove(LogicalUnit):
1667
  """Move an instance by data-copying.
1668

1669
  """
1670
  HPATH = "instance-move"
1671
  HTYPE = constants.HTYPE_INSTANCE
1672
  REQ_BGL = False
1673

    
1674
  def ExpandNames(self):
1675
    self._ExpandAndLockInstance()
1676
    (self.op.target_node_uuid, self.op.target_node) = \
1677
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1678
                            self.op.target_node)
1679
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1680
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1681
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1682

    
1683
  def DeclareLocks(self, level):
1684
    if level == locking.LEVEL_NODE:
1685
      self._LockInstancesNodes(primary_only=True)
1686
    elif level == locking.LEVEL_NODE_RES:
1687
      # Copy node locks
1688
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1689
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1690

    
1691
  def BuildHooksEnv(self):
1692
    """Build hooks env.
1693

1694
    This runs on master, primary and secondary nodes of the instance.
1695

1696
    """
1697
    env = {
1698
      "TARGET_NODE": self.op.target_node,
1699
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1700
      }
1701
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1702
    return env
1703

    
1704
  def BuildHooksNodes(self):
1705
    """Build hooks nodes.
1706

1707
    """
1708
    nl = [
1709
      self.cfg.GetMasterNode(),
1710
      self.instance.primary_node,
1711
      self.op.target_node_uuid,
1712
      ]
1713
    return (nl, nl)
1714

    
1715
  def CheckPrereq(self):
1716
    """Check prerequisites.
1717

1718
    This checks that the instance is in the cluster.
1719

1720
    """
1721
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1722
    assert self.instance is not None, \
1723
      "Cannot retrieve locked instance %s" % self.op.instance_name
1724

    
1725
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1726
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1727
                                 self.instance.disk_template,
1728
                                 errors.ECODE_STATE)
1729

    
1730
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1731
    assert target_node is not None, \
1732
      "Cannot retrieve locked node %s" % self.op.target_node
1733

    
1734
    self.target_node_uuid = target_node.uuid
1735
    if target_node.uuid == self.instance.primary_node:
1736
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1737
                                 (self.instance.name, target_node.name),
1738
                                 errors.ECODE_STATE)
1739

    
1740
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1741

    
1742
    for idx, dsk in enumerate(self.instance.disks):
1743
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1744
                              constants.DT_SHARED_FILE):
1745
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1746
                                   " cannot copy" % idx, errors.ECODE_STATE)
1747

    
1748
    CheckNodeOnline(self, target_node.uuid)
1749
    CheckNodeNotDrained(self, target_node.uuid)
1750
    CheckNodeVmCapable(self, target_node.uuid)
1751
    cluster = self.cfg.GetClusterInfo()
1752
    group_info = self.cfg.GetNodeGroup(target_node.group)
1753
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1754
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1755
                           ignore=self.op.ignore_ipolicy)
1756

    
1757
    if self.instance.admin_state == constants.ADMINST_UP:
1758
      # check memory requirements on the secondary node
1759
      CheckNodeFreeMemory(
1760
          self, target_node.uuid, "failing over instance %s" %
1761
          self.instance.name, bep[constants.BE_MAXMEM],
1762
          self.instance.hypervisor,
1763
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1764
    else:
1765
      self.LogInfo("Not checking memory on the secondary node as"
1766
                   " instance will not be started")
1767

    
1768
    # check bridge existance
1769
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1770

    
1771
  def Exec(self, feedback_fn):
1772
    """Move an instance.
1773

1774
    The move is done by shutting it down on its present node, copying
1775
    the data over (slow) and starting it on the new node.
1776

1777
    """
1778
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1779
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1780

    
1781
    self.LogInfo("Shutting down instance %s on source node %s",
1782
                 self.instance.name, source_node.name)
1783

    
1784
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1785
            self.owned_locks(locking.LEVEL_NODE_RES))
1786

    
1787
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1788
                                             self.op.shutdown_timeout,
1789
                                             self.op.reason)
1790
    if self.op.ignore_consistency:
1791
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1792
                  " anyway. Please make sure node %s is down. Error details" %
1793
                  (self.instance.name, source_node.name, source_node.name),
1794
                  self.LogWarning)
1795
    else:
1796
      result.Raise("Could not shutdown instance %s on node %s" %
1797
                   (self.instance.name, source_node.name))
1798

    
1799
    # create the target disks
1800
    try:
1801
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1802
    except errors.OpExecError:
1803
      self.LogWarning("Device creation failed")
1804
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1805
      raise
1806

    
1807
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1808

    
1809
    errs = []
1810
    # activate, get path, copy the data over
1811
    for idx, disk in enumerate(self.instance.disks):
1812
      self.LogInfo("Copying data for disk %d", idx)
1813
      result = self.rpc.call_blockdev_assemble(
1814
                 target_node.uuid, (disk, self.instance), self.instance.name,
1815
                 True, idx)
1816
      if result.fail_msg:
1817
        self.LogWarning("Can't assemble newly created disk %d: %s",
1818
                        idx, result.fail_msg)
1819
        errs.append(result.fail_msg)
1820
        break
1821
      dev_path = result.payload
1822
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1823
                                                                self.instance),
1824
                                             target_node.secondary_ip,
1825
                                             dev_path, cluster_name)
1826
      if result.fail_msg:
1827
        self.LogWarning("Can't copy data over for disk %d: %s",
1828
                        idx, result.fail_msg)
1829
        errs.append(result.fail_msg)
1830
        break
1831

    
1832
    if errs:
1833
      self.LogWarning("Some disks failed to copy, aborting")
1834
      try:
1835
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1836
      finally:
1837
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1838
        raise errors.OpExecError("Errors during disk copy: %s" %
1839
                                 (",".join(errs),))
1840

    
1841
    self.instance.primary_node = target_node.uuid
1842
    self.cfg.Update(self.instance, feedback_fn)
1843

    
1844
    self.LogInfo("Removing the disks on the original node")
1845
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1846

    
1847
    # Only start the instance if it's marked as up
1848
    if self.instance.admin_state == constants.ADMINST_UP:
1849
      self.LogInfo("Starting instance %s on node %s",
1850
                   self.instance.name, target_node.name)
1851

    
1852
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1853
                                          ignore_secondaries=True)
1854
      if not disks_ok:
1855
        ShutdownInstanceDisks(self, self.instance)
1856
        raise errors.OpExecError("Can't activate the instance's disks")
1857

    
1858
      result = self.rpc.call_instance_start(target_node.uuid,
1859
                                            (self.instance, None, None), False,
1860
                                            self.op.reason)
1861
      msg = result.fail_msg
1862
      if msg:
1863
        ShutdownInstanceDisks(self, self.instance)
1864
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1865
                                 (self.instance.name, target_node.name, msg))
1866

    
1867

    
1868
class LUInstanceMultiAlloc(NoHooksLU):
1869
  """Allocates multiple instances at the same time.
1870

1871
  """
1872
  REQ_BGL = False
1873

    
1874
  def CheckArguments(self):
1875
    """Check arguments.
1876

1877
    """
1878
    nodes = []
1879
    for inst in self.op.instances:
1880
      if inst.iallocator is not None:
1881
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1882
                                   " instance objects", errors.ECODE_INVAL)
1883
      nodes.append(bool(inst.pnode))
1884
      if inst.disk_template in constants.DTS_INT_MIRROR:
1885
        nodes.append(bool(inst.snode))
1886

    
1887
    has_nodes = compat.any(nodes)
1888
    if compat.all(nodes) ^ has_nodes:
1889
      raise errors.OpPrereqError("There are instance objects providing"
1890
                                 " pnode/snode while others do not",
1891
                                 errors.ECODE_INVAL)
1892

    
1893
    if not has_nodes and self.op.iallocator is None:
1894
      default_iallocator = self.cfg.GetDefaultIAllocator()
1895
      if default_iallocator:
1896
        self.op.iallocator = default_iallocator
1897
      else:
1898
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1899
                                   " given and no cluster-wide default"
1900
                                   " iallocator found; please specify either"
1901
                                   " an iallocator or nodes on the instances"
1902
                                   " or set a cluster-wide default iallocator",
1903
                                   errors.ECODE_INVAL)
1904

    
1905
    _CheckOpportunisticLocking(self.op)
1906

    
1907
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1908
    if dups:
1909
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1910
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1911

    
1912
  def ExpandNames(self):
1913
    """Calculate the locks.
1914

1915
    """
1916
    self.share_locks = ShareAll()
1917
    self.needed_locks = {
1918
      # iallocator will select nodes and even if no iallocator is used,
1919
      # collisions with LUInstanceCreate should be avoided
1920
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1921
      }
1922

    
1923
    if self.op.iallocator:
1924
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1925
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1926

    
1927
      if self.op.opportunistic_locking:
1928
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1929
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1930
    else:
1931
      nodeslist = []
1932
      for inst in self.op.instances:
1933
        (inst.pnode_uuid, inst.pnode) = \
1934
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1935
        nodeslist.append(inst.pnode_uuid)
1936
        if inst.snode is not None:
1937
          (inst.snode_uuid, inst.snode) = \
1938
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1939
          nodeslist.append(inst.snode_uuid)
1940

    
1941
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1942
      # Lock resources of instance's primary and secondary nodes (copy to
1943
      # prevent accidential modification)
1944
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1945

    
1946
  def CheckPrereq(self):
1947
    """Check prerequisite.
1948

1949
    """
1950
    if self.op.iallocator:
1951
      cluster = self.cfg.GetClusterInfo()
1952
      default_vg = self.cfg.GetVGName()
1953
      ec_id = self.proc.GetECId()
1954

    
1955
      if self.op.opportunistic_locking:
1956
        # Only consider nodes for which a lock is held
1957
        node_whitelist = self.cfg.GetNodeNames(
1958
                           list(self.owned_locks(locking.LEVEL_NODE)))
1959
      else:
1960
        node_whitelist = None
1961

    
1962
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1963
                                           _ComputeNics(op, cluster, None,
1964
                                                        self.cfg, ec_id),
1965
                                           _ComputeFullBeParams(op, cluster),
1966
                                           node_whitelist)
1967
               for op in self.op.instances]
1968

    
1969
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1970
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1971

    
1972
      ial.Run(self.op.iallocator)
1973

    
1974
      if not ial.success:
1975
        raise errors.OpPrereqError("Can't compute nodes using"
1976
                                   " iallocator '%s': %s" %
1977
                                   (self.op.iallocator, ial.info),
1978
                                   errors.ECODE_NORES)
1979

    
1980
      self.ia_result = ial.result
1981

    
1982
    if self.op.dry_run:
1983
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1984
        constants.JOB_IDS_KEY: [],
1985
        })
1986

    
1987
  def _ConstructPartialResult(self):
1988
    """Contructs the partial result.
1989

1990
    """
1991
    if self.op.iallocator:
1992
      (allocatable, failed_insts) = self.ia_result
1993
      allocatable_insts = map(compat.fst, allocatable)
1994
    else:
1995
      allocatable_insts = [op.instance_name for op in self.op.instances]
1996
      failed_insts = []
1997

    
1998
    return {
1999
      constants.ALLOCATABLE_KEY: allocatable_insts,
2000
      constants.FAILED_KEY: failed_insts,
2001
      }
2002

    
2003
  def Exec(self, feedback_fn):
2004
    """Executes the opcode.
2005

2006
    """
2007
    jobs = []
2008
    if self.op.iallocator:
2009
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2010
      (allocatable, failed) = self.ia_result
2011

    
2012
      for (name, node_names) in allocatable:
2013
        op = op2inst.pop(name)
2014

    
2015
        (op.pnode_uuid, op.pnode) = \
2016
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2017
        if len(node_names) > 1:
2018
          (op.snode_uuid, op.snode) = \
2019
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2020

    
2021
          jobs.append([op])
2022

    
2023
        missing = set(op2inst.keys()) - set(failed)
2024
        assert not missing, \
2025
          "Iallocator did return incomplete result: %s" % \
2026
          utils.CommaJoin(missing)
2027
    else:
2028
      jobs.extend([op] for op in self.op.instances)
2029

    
2030
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2031

    
2032

    
2033
class _InstNicModPrivate:
2034
  """Data structure for network interface modifications.
2035

2036
  Used by L{LUInstanceSetParams}.
2037

2038
  """
2039
  def __init__(self):
2040
    self.params = None
2041
    self.filled = None
2042

    
2043

    
2044
def _PrepareContainerMods(mods, private_fn):
2045
  """Prepares a list of container modifications by adding a private data field.
2046

2047
  @type mods: list of tuples; (operation, index, parameters)
2048
  @param mods: List of modifications
2049
  @type private_fn: callable or None
2050
  @param private_fn: Callable for constructing a private data field for a
2051
    modification
2052
  @rtype: list
2053

2054
  """
2055
  if private_fn is None:
2056
    fn = lambda: None
2057
  else:
2058
    fn = private_fn
2059

    
2060
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2061

    
2062

    
2063
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2064
  """Checks if nodes have enough physical CPUs
2065

2066
  This function checks if all given nodes have the needed number of
2067
  physical CPUs. In case any node has less CPUs or we cannot get the
2068
  information from the node, this function raises an OpPrereqError
2069
  exception.
2070

2071
  @type lu: C{LogicalUnit}
2072
  @param lu: a logical unit from which we get configuration data
2073
  @type node_uuids: C{list}
2074
  @param node_uuids: the list of node UUIDs to check
2075
  @type requested: C{int}
2076
  @param requested: the minimum acceptable number of physical CPUs
2077
  @type hypervisor_specs: list of pairs (string, dict of strings)
2078
  @param hypervisor_specs: list of hypervisor specifications in
2079
      pairs (hypervisor_name, hvparams)
2080
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2081
      or we cannot check the node
2082

2083
  """
2084
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2085
  for node_uuid in node_uuids:
2086
    info = nodeinfo[node_uuid]
2087
    node_name = lu.cfg.GetNodeName(node_uuid)
2088
    info.Raise("Cannot get current information from node %s" % node_name,
2089
               prereq=True, ecode=errors.ECODE_ENVIRON)
2090
    (_, _, (hv_info, )) = info.payload
2091
    num_cpus = hv_info.get("cpu_total", None)
2092
    if not isinstance(num_cpus, int):
2093
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2094
                                 " on node %s, result was '%s'" %
2095
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2096
    if requested > num_cpus:
2097
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2098
                                 "required" % (node_name, num_cpus, requested),
2099
                                 errors.ECODE_NORES)
2100

    
2101

    
2102
def GetItemFromContainer(identifier, kind, container):
2103
  """Return the item refered by the identifier.
2104

2105
  @type identifier: string
2106
  @param identifier: Item index or name or UUID
2107
  @type kind: string
2108
  @param kind: One-word item description
2109
  @type container: list
2110
  @param container: Container to get the item from
2111

2112
  """
2113
  # Index
2114
  try:
2115
    idx = int(identifier)
2116
    if idx == -1:
2117
      # Append
2118
      absidx = len(container) - 1
2119
    elif idx < 0:
2120
      raise IndexError("Not accepting negative indices other than -1")
2121
    elif idx > len(container):
2122
      raise IndexError("Got %s index %s, but there are only %s" %
2123
                       (kind, idx, len(container)))
2124
    else:
2125
      absidx = idx
2126
    return (absidx, container[idx])
2127
  except ValueError:
2128
    pass
2129

    
2130
  for idx, item in enumerate(container):
2131
    if item.uuid == identifier or item.name == identifier:
2132
      return (idx, item)
2133

    
2134
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2135
                             (kind, identifier), errors.ECODE_NOENT)
2136

    
2137

    
2138
def _ApplyContainerMods(kind, container, chgdesc, mods,
2139
                        create_fn, modify_fn, remove_fn,
2140
                        post_add_fn=None):
2141
  """Applies descriptions in C{mods} to C{container}.
2142

2143
  @type kind: string
2144
  @param kind: One-word item description
2145
  @type container: list
2146
  @param container: Container to modify
2147
  @type chgdesc: None or list
2148
  @param chgdesc: List of applied changes
2149
  @type mods: list
2150
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2151
  @type create_fn: callable
2152
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2153
    receives absolute item index, parameters and private data object as added
2154
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2155
    as list
2156
  @type modify_fn: callable
2157
  @param modify_fn: Callback for modifying an existing item
2158
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2159
    and private data object as added by L{_PrepareContainerMods}, returns
2160
    changes as list
2161
  @type remove_fn: callable
2162
  @param remove_fn: Callback on removing item; receives absolute item index,
2163
    item and private data object as added by L{_PrepareContainerMods}
2164
  @type post_add_fn: callable
2165
  @param post_add_fn: Callable for post-processing a newly created item after
2166
    it has been put into the container. It receives the index of the new item
2167
    and the new item as parameters.
2168

2169
  """
2170
  for (op, identifier, params, private) in mods:
2171
    changes = None
2172

    
2173
    if op == constants.DDM_ADD:
2174
      # Calculate where item will be added
2175
      # When adding an item, identifier can only be an index
2176
      try:
2177
        idx = int(identifier)
2178
      except ValueError:
2179
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2180
                                   " identifier for %s" % constants.DDM_ADD,
2181
                                   errors.ECODE_INVAL)
2182
      if idx == -1:
2183
        addidx = len(container)
2184
      else:
2185
        if idx < 0:
2186
          raise IndexError("Not accepting negative indices other than -1")
2187
        elif idx > len(container):
2188
          raise IndexError("Got %s index %s, but there are only %s" %
2189
                           (kind, idx, len(container)))
2190
        addidx = idx
2191

    
2192
      if create_fn is None:
2193
        item = params
2194
      else:
2195
        (item, changes) = create_fn(addidx, params, private)
2196

    
2197
      if idx == -1:
2198
        container.append(item)
2199
      else:
2200
        assert idx >= 0
2201
        assert idx <= len(container)
2202
        # list.insert does so before the specified index
2203
        container.insert(idx, item)
2204

    
2205
      if post_add_fn is not None:
2206
        post_add_fn(addidx, item)
2207

    
2208
    else:
2209
      # Retrieve existing item
2210
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2211

    
2212
      if op == constants.DDM_REMOVE:
2213
        assert not params
2214

    
2215
        if remove_fn is not None:
2216
          remove_fn(absidx, item, private)
2217

    
2218
        changes = [("%s/%s" % (kind, absidx), "remove")]
2219

    
2220
        assert container[absidx] == item
2221
        del container[absidx]
2222
      elif op == constants.DDM_MODIFY:
2223
        if modify_fn is not None:
2224
          changes = modify_fn(absidx, item, params, private)
2225
      else:
2226
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2227

    
2228
    assert _TApplyContModsCbChanges(changes)
2229

    
2230
    if not (chgdesc is None or changes is None):
2231
      chgdesc.extend(changes)
2232

    
2233

    
2234
def _UpdateIvNames(base_index, disks):
2235
  """Updates the C{iv_name} attribute of disks.
2236

2237
  @type disks: list of L{objects.Disk}
2238

2239
  """
2240
  for (idx, disk) in enumerate(disks):
2241
    disk.iv_name = "disk/%s" % (base_index + idx, )
2242

    
2243

    
2244
class LUInstanceSetParams(LogicalUnit):
2245
  """Modifies an instances's parameters.
2246

2247
  """
2248
  HPATH = "instance-modify"
2249
  HTYPE = constants.HTYPE_INSTANCE
2250
  REQ_BGL = False
2251

    
2252
  @staticmethod
2253
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2254
    assert ht.TList(mods)
2255
    assert not mods or len(mods[0]) in (2, 3)
2256

    
2257
    if mods and len(mods[0]) == 2:
2258
      result = []
2259

    
2260
      addremove = 0
2261
      for op, params in mods:
2262
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2263
          result.append((op, -1, params))
2264
          addremove += 1
2265

    
2266
          if addremove > 1:
2267
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2268
                                       " supported at a time" % kind,
2269
                                       errors.ECODE_INVAL)
2270
        else:
2271
          result.append((constants.DDM_MODIFY, op, params))
2272

    
2273
      assert verify_fn(result)
2274
    else:
2275
      result = mods
2276

    
2277
    return result
2278

    
2279
  @staticmethod
2280
  def _CheckMods(kind, mods, key_types, item_fn):
2281
    """Ensures requested disk/NIC modifications are valid.
2282

2283
    """
2284
    for (op, _, params) in mods:
2285
      assert ht.TDict(params)
2286

    
2287
      # If 'key_types' is an empty dict, we assume we have an
2288
      # 'ext' template and thus do not ForceDictType
2289
      if key_types:
2290
        utils.ForceDictType(params, key_types)
2291

    
2292
      if op == constants.DDM_REMOVE:
2293
        if params:
2294
          raise errors.OpPrereqError("No settings should be passed when"
2295
                                     " removing a %s" % kind,
2296
                                     errors.ECODE_INVAL)
2297
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2298
        item_fn(op, params)
2299
      else:
2300
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2301

    
2302
  @staticmethod
2303
  def _VerifyDiskModification(op, params, excl_stor):
2304
    """Verifies a disk modification.
2305

2306
    """
2307
    if op == constants.DDM_ADD:
2308
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2309
      if mode not in constants.DISK_ACCESS_SET:
2310
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2311
                                   errors.ECODE_INVAL)
2312

    
2313
      size = params.get(constants.IDISK_SIZE, None)
2314
      if size is None:
2315
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2316
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2317
      size = int(size)
2318

    
2319
      params[constants.IDISK_SIZE] = size
2320
      name = params.get(constants.IDISK_NAME, None)
2321
      if name is not None and name.lower() == constants.VALUE_NONE:
2322
        params[constants.IDISK_NAME] = None
2323

    
2324
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2325

    
2326
    elif op == constants.DDM_MODIFY:
2327
      if constants.IDISK_SIZE in params:
2328
        raise errors.OpPrereqError("Disk size change not possible, use"
2329
                                   " grow-disk", errors.ECODE_INVAL)
2330
      if len(params) > 2:
2331
        raise errors.OpPrereqError("Disk modification doesn't support"
2332
                                   " additional arbitrary parameters",
2333
                                   errors.ECODE_INVAL)
2334
      name = params.get(constants.IDISK_NAME, None)
2335
      if name is not None and name.lower() == constants.VALUE_NONE:
2336
        params[constants.IDISK_NAME] = None
2337

    
2338
  @staticmethod
2339
  def _VerifyNicModification(op, params):
2340
    """Verifies a network interface modification.
2341

2342
    """
2343
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2344
      ip = params.get(constants.INIC_IP, None)
2345
      name = params.get(constants.INIC_NAME, None)
2346
      req_net = params.get(constants.INIC_NETWORK, None)
2347
      link = params.get(constants.NIC_LINK, None)
2348
      mode = params.get(constants.NIC_MODE, None)
2349
      if name is not None and name.lower() == constants.VALUE_NONE:
2350
        params[constants.INIC_NAME] = None
2351
      if req_net is not None:
2352
        if req_net.lower() == constants.VALUE_NONE:
2353
          params[constants.INIC_NETWORK] = None
2354
          req_net = None
2355
        elif link is not None or mode is not None:
2356
          raise errors.OpPrereqError("If network is given"
2357
                                     " mode or link should not",
2358
                                     errors.ECODE_INVAL)
2359

    
2360
      if op == constants.DDM_ADD:
2361
        macaddr = params.get(constants.INIC_MAC, None)
2362
        if macaddr is None:
2363
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2364

    
2365
      if ip is not None:
2366
        if ip.lower() == constants.VALUE_NONE:
2367
          params[constants.INIC_IP] = None
2368
        else:
2369
          if ip.lower() == constants.NIC_IP_POOL:
2370
            if op == constants.DDM_ADD and req_net is None:
2371
              raise errors.OpPrereqError("If ip=pool, parameter network"
2372
                                         " cannot be none",
2373
                                         errors.ECODE_INVAL)
2374
          else:
2375
            if not netutils.IPAddress.IsValid(ip):
2376
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2377
                                         errors.ECODE_INVAL)
2378

    
2379
      if constants.INIC_MAC in params:
2380
        macaddr = params[constants.INIC_MAC]
2381
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2382
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2383

    
2384
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2385
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2386
                                     " modifying an existing NIC",
2387
                                     errors.ECODE_INVAL)
2388

    
2389
  def CheckArguments(self):
2390
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2391
            self.op.hvparams or self.op.beparams or self.op.os_name or
2392
            self.op.osparams or self.op.offline is not None or
2393
            self.op.runtime_mem or self.op.pnode):
2394
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2395

    
2396
    if self.op.hvparams:
2397
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2398
                           "hypervisor", "instance", "cluster")
2399

    
2400
    self.op.disks = self._UpgradeDiskNicMods(
2401
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2402
    self.op.nics = self._UpgradeDiskNicMods(
2403
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2404

    
2405
    if self.op.disks and self.op.disk_template is not None:
2406
      raise errors.OpPrereqError("Disk template conversion and other disk"
2407
                                 " changes not supported at the same time",
2408
                                 errors.ECODE_INVAL)
2409

    
2410
    if (self.op.disk_template and
2411
        self.op.disk_template in constants.DTS_INT_MIRROR and
2412
        self.op.remote_node is None):
2413
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2414
                                 " one requires specifying a secondary node",
2415
                                 errors.ECODE_INVAL)
2416

    
2417
    # Check NIC modifications
2418
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2419
                    self._VerifyNicModification)
2420

    
2421
    if self.op.pnode:
2422
      (self.op.pnode_uuid, self.op.pnode) = \
2423
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2424

    
2425
  def ExpandNames(self):
2426
    self._ExpandAndLockInstance()
2427
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2428
    # Can't even acquire node locks in shared mode as upcoming changes in
2429
    # Ganeti 2.6 will start to modify the node object on disk conversion
2430
    self.needed_locks[locking.LEVEL_NODE] = []
2431
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2432
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2433
    # Look node group to look up the ipolicy
2434
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2435

    
2436
  def DeclareLocks(self, level):
2437
    if level == locking.LEVEL_NODEGROUP:
2438
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2439
      # Acquire locks for the instance's nodegroups optimistically. Needs
2440
      # to be verified in CheckPrereq
2441
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2442
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2443
    elif level == locking.LEVEL_NODE:
2444
      self._LockInstancesNodes()
2445
      if self.op.disk_template and self.op.remote_node:
2446
        (self.op.remote_node_uuid, self.op.remote_node) = \
2447
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2448
                                self.op.remote_node)
2449
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2450
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2451
      # Copy node locks
2452
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2453
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2454

    
2455
  def BuildHooksEnv(self):
2456
    """Build hooks env.
2457

2458
    This runs on the master, primary and secondaries.
2459

2460
    """
2461
    args = {}
2462
    if constants.BE_MINMEM in self.be_new:
2463
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2464
    if constants.BE_MAXMEM in self.be_new:
2465
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2466
    if constants.BE_VCPUS in self.be_new:
2467
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2468
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2469
    # information at all.
2470

    
2471
    if self._new_nics is not None:
2472
      nics = []
2473

    
2474
      for nic in self._new_nics:
2475
        n = copy.deepcopy(nic)
2476
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2477
        n.nicparams = nicparams
2478
        nics.append(NICToTuple(self, n))
2479

    
2480
      args["nics"] = nics
2481

    
2482
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2483
    if self.op.disk_template:
2484
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2485
    if self.op.runtime_mem:
2486
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2487

    
2488
    return env
2489

    
2490
  def BuildHooksNodes(self):
2491
    """Build hooks nodes.
2492

2493
    """
2494
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2495
    return (nl, nl)
2496

    
2497
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2498
                              old_params, cluster, pnode_uuid):
2499

    
2500
    update_params_dict = dict([(key, params[key])
2501
                               for key in constants.NICS_PARAMETERS
2502
                               if key in params])
2503

    
2504
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2505
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2506

    
2507
    new_net_uuid = None
2508
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2509
    if new_net_uuid_or_name:
2510
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2511
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2512

    
2513
    if old_net_uuid:
2514
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2515

    
2516
    if new_net_uuid:
2517
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2518
      if not netparams:
2519
        raise errors.OpPrereqError("No netparams found for the network"
2520
                                   " %s, probably not connected" %
2521
                                   new_net_obj.name, errors.ECODE_INVAL)
2522
      new_params = dict(netparams)
2523
    else:
2524
      new_params = GetUpdatedParams(old_params, update_params_dict)
2525

    
2526
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2527

    
2528
    new_filled_params = cluster.SimpleFillNIC(new_params)
2529
    objects.NIC.CheckParameterSyntax(new_filled_params)
2530

    
2531
    new_mode = new_filled_params[constants.NIC_MODE]
2532
    if new_mode == constants.NIC_MODE_BRIDGED:
2533
      bridge = new_filled_params[constants.NIC_LINK]
2534
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2535
      if msg:
2536
        msg = "Error checking bridges on node '%s': %s" % \
2537
                (self.cfg.GetNodeName(pnode_uuid), msg)
2538
        if self.op.force:
2539
          self.warn.append(msg)
2540
        else:
2541
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2542

    
2543
    elif new_mode == constants.NIC_MODE_ROUTED:
2544
      ip = params.get(constants.INIC_IP, old_ip)
2545
      if ip is None:
2546
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2547
                                   " on a routed NIC", errors.ECODE_INVAL)
2548

    
2549
    elif new_mode == constants.NIC_MODE_OVS:
2550
      # TODO: check OVS link
2551
      self.LogInfo("OVS links are currently not checked for correctness")
2552

    
2553
    if constants.INIC_MAC in params:
2554
      mac = params[constants.INIC_MAC]
2555
      if mac is None:
2556
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2557
                                   errors.ECODE_INVAL)
2558
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2559
        # otherwise generate the MAC address
2560
        params[constants.INIC_MAC] = \
2561
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2562
      else:
2563
        # or validate/reserve the current one
2564
        try:
2565
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2566
        except errors.ReservationError:
2567
          raise errors.OpPrereqError("MAC address '%s' already in use"
2568
                                     " in cluster" % mac,
2569
                                     errors.ECODE_NOTUNIQUE)
2570
    elif new_net_uuid != old_net_uuid:
2571

    
2572
      def get_net_prefix(net_uuid):
2573
        mac_prefix = None
2574
        if net_uuid:
2575
          nobj = self.cfg.GetNetwork(net_uuid)
2576
          mac_prefix = nobj.mac_prefix
2577

    
2578
        return mac_prefix
2579

    
2580
      new_prefix = get_net_prefix(new_net_uuid)
2581
      old_prefix = get_net_prefix(old_net_uuid)
2582
      if old_prefix != new_prefix:
2583
        params[constants.INIC_MAC] = \
2584
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2585

    
2586
    # if there is a change in (ip, network) tuple
2587
    new_ip = params.get(constants.INIC_IP, old_ip)
2588
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2589
      if new_ip:
2590
        # if IP is pool then require a network and generate one IP
2591
        if new_ip.lower() == constants.NIC_IP_POOL:
2592
          if new_net_uuid:
2593
            try:
2594
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2595
            except errors.ReservationError:
2596
              raise errors.OpPrereqError("Unable to get a free IP"
2597
                                         " from the address pool",
2598
                                         errors.ECODE_STATE)
2599
            self.LogInfo("Chose IP %s from network %s",
2600
                         new_ip,
2601
                         new_net_obj.name)
2602
            params[constants.INIC_IP] = new_ip
2603
          else:
2604
            raise errors.OpPrereqError("ip=pool, but no network found",
2605
                                       errors.ECODE_INVAL)
2606
        # Reserve new IP if in the new network if any
2607
        elif new_net_uuid:
2608
          try:
2609
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2610
            self.LogInfo("Reserving IP %s in network %s",
2611
                         new_ip, new_net_obj.name)
2612
          except errors.ReservationError:
2613
            raise errors.OpPrereqError("IP %s not available in network %s" %
2614
                                       (new_ip, new_net_obj.name),
2615
                                       errors.ECODE_NOTUNIQUE)
2616
        # new network is None so check if new IP is a conflicting IP
2617
        elif self.op.conflicts_check:
2618
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2619

    
2620
      # release old IP if old network is not None
2621
      if old_ip and old_net_uuid:
2622
        try:
2623
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2624
        except errors.AddressPoolError:
2625
          logging.warning("Release IP %s not contained in network %s",
2626
                          old_ip, old_net_obj.name)
2627

    
2628
    # there are no changes in (ip, network) tuple and old network is not None
2629
    elif (old_net_uuid is not None and
2630
          (req_link is not None or req_mode is not None)):
2631
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2632
                                 " a NIC that is connected to a network",
2633
                                 errors.ECODE_INVAL)
2634

    
2635
    private.params = new_params
2636
    private.filled = new_filled_params
2637

    
2638
  def _PreCheckDiskTemplate(self, pnode_info):
2639
    """CheckPrereq checks related to a new disk template."""
2640
    # Arguments are passed to avoid configuration lookups
2641
    pnode_uuid = self.instance.primary_node
2642
    if self.instance.disk_template == self.op.disk_template:
2643
      raise errors.OpPrereqError("Instance already has disk template %s" %
2644
                                 self.instance.disk_template,
2645
                                 errors.ECODE_INVAL)
2646

    
2647
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2648
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2649
                                 " cluster." % self.op.disk_template)
2650

    
2651
    if (self.instance.disk_template,
2652
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2653
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2654
                                 " %s to %s" % (self.instance.disk_template,
2655
                                                self.op.disk_template),
2656
                                 errors.ECODE_INVAL)
2657
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2658
                       msg="cannot change disk template")
2659
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2660
      if self.op.remote_node_uuid == pnode_uuid:
2661
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2662
                                   " as the primary node of the instance" %
2663
                                   self.op.remote_node, errors.ECODE_STATE)
2664
      CheckNodeOnline(self, self.op.remote_node_uuid)
2665
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2666
      # FIXME: here we assume that the old instance type is DT_PLAIN
2667
      assert self.instance.disk_template == constants.DT_PLAIN
2668
      disks = [{constants.IDISK_SIZE: d.size,
2669
                constants.IDISK_VG: d.logical_id[0]}
2670
               for d in self.instance.disks]
2671
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2672
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2673

    
2674
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2675
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2676
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2677
                                                              snode_group)
2678
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2679
                             ignore=self.op.ignore_ipolicy)
2680
      if pnode_info.group != snode_info.group:
2681
        self.LogWarning("The primary and secondary nodes are in two"
2682
                        " different node groups; the disk parameters"
2683
                        " from the first disk's node group will be"
2684
                        " used")
2685

    
2686
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2687
      # Make sure none of the nodes require exclusive storage
2688
      nodes = [pnode_info]
2689
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2690
        assert snode_info
2691
        nodes.append(snode_info)
2692
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2693
      if compat.any(map(has_es, nodes)):
2694
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2695
                  " storage is enabled" % (self.instance.disk_template,
2696
                                           self.op.disk_template))
2697
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2698

    
2699
  def _PreCheckDisks(self, ispec):
2700
    """CheckPrereq checks related to disk changes.
2701

2702
    @type ispec: dict
2703
    @param ispec: instance specs to be updated with the new disks
2704

2705
    """
2706
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2707

    
2708
    excl_stor = compat.any(
2709
      rpc.GetExclusiveStorageForNodes(self.cfg,
2710
                                      self.instance.all_nodes).values()
2711
      )
2712

    
2713
    # Check disk modifications. This is done here and not in CheckArguments
2714
    # (as with NICs), because we need to know the instance's disk template
2715
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2716
    if self.instance.disk_template == constants.DT_EXT:
2717
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2718
    else:
2719
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2720
                      ver_fn)
2721

    
2722
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2723

    
2724
    # Check the validity of the `provider' parameter
2725
    if self.instance.disk_template in constants.DT_EXT:
2726
      for mod in self.diskmod:
2727
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2728
        if mod[0] == constants.DDM_ADD:
2729
          if ext_provider is None:
2730
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2731
                                       " '%s' missing, during disk add" %
2732
                                       (constants.DT_EXT,
2733
                                        constants.IDISK_PROVIDER),
2734
                                       errors.ECODE_NOENT)
2735
        elif mod[0] == constants.DDM_MODIFY:
2736
          if ext_provider:
2737
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2738
                                       " modification" %
2739
                                       constants.IDISK_PROVIDER,
2740
                                       errors.ECODE_INVAL)
2741
    else:
2742
      for mod in self.diskmod:
2743
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2744
        if ext_provider is not None:
2745
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2746
                                     " instances of type '%s'" %
2747
                                     (constants.IDISK_PROVIDER,
2748
                                      constants.DT_EXT),
2749
                                     errors.ECODE_INVAL)
2750

    
2751
    if not self.op.wait_for_sync and self.instance.disks_active:
2752
      for mod in self.diskmod:
2753
        if mod[0] == constants.DDM_ADD:
2754
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2755
                                     " activated disks and"
2756
                                     " --no-wait-for-sync given.",
2757
                                     errors.ECODE_INVAL)
2758

    
2759
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2760
      raise errors.OpPrereqError("Disk operations not supported for"
2761
                                 " diskless instances", errors.ECODE_INVAL)
2762

    
2763
    def _PrepareDiskMod(_, disk, params, __):
2764
      disk.name = params.get(constants.IDISK_NAME, None)
2765

    
2766
    # Verify disk changes (operating on a copy)
2767
    disks = copy.deepcopy(self.instance.disks)
2768
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2769
                        _PrepareDiskMod, None)
2770
    utils.ValidateDeviceNames("disk", disks)
2771
    if len(disks) > constants.MAX_DISKS:
2772
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2773
                                 " more" % constants.MAX_DISKS,
2774
                                 errors.ECODE_STATE)
2775
    disk_sizes = [disk.size for disk in self.instance.disks]
2776
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2777
                      self.diskmod if op == constants.DDM_ADD)
2778
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2779
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2780

    
2781
    if self.op.offline is not None and self.op.offline:
2782
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2783
                         msg="can't change to offline")
2784

    
2785
  def CheckPrereq(self):
2786
    """Check prerequisites.
2787

2788
    This only checks the instance list against the existing names.
2789

2790
    """
2791
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2792
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2793
    self.cluster = self.cfg.GetClusterInfo()
2794

    
2795
    assert self.instance is not None, \
2796
      "Cannot retrieve locked instance %s" % self.op.instance_name
2797

    
2798
    pnode_uuid = self.instance.primary_node
2799

    
2800
    self.warn = []
2801

    
2802
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2803
        not self.op.force):
2804
      # verify that the instance is not up
2805
      instance_info = self.rpc.call_instance_info(
2806
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2807
          self.instance.hvparams)
2808
      if instance_info.fail_msg:
2809
        self.warn.append("Can't get instance runtime information: %s" %
2810
                         instance_info.fail_msg)
2811
      elif instance_info.payload:
2812
        raise errors.OpPrereqError("Instance is still running on %s" %
2813
                                   self.cfg.GetNodeName(pnode_uuid),
2814
                                   errors.ECODE_STATE)
2815

    
2816
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2817
    node_uuids = list(self.instance.all_nodes)
2818
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2819

    
2820
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2821
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2822
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2823

    
2824
    # dictionary with instance information after the modification
2825
    ispec = {}
2826

    
2827
    # Prepare NIC modifications
2828
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2829

    
2830
    # OS change
2831
    if self.op.os_name and not self.op.force:
2832
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2833
                     self.op.force_variant)
2834
      instance_os = self.op.os_name
2835
    else:
2836
      instance_os = self.instance.os
2837

    
2838
    assert not (self.op.disk_template and self.op.disks), \
2839
      "Can't modify disk template and apply disk changes at the same time"
2840

    
2841
    if self.op.disk_template:
2842
      self._PreCheckDiskTemplate(pnode_info)
2843

    
2844
    self._PreCheckDisks(ispec)
2845

    
2846
    # hvparams processing
2847
    if self.op.hvparams:
2848
      hv_type = self.instance.hypervisor
2849
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2850
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2851
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2852

    
2853
      # local check
2854
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2855
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2856
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2857
      self.hv_inst = i_hvdict # the new dict (without defaults)
2858
    else:
2859
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2860
                                                   self.instance.os,
2861
                                                   self.instance.hvparams)
2862
      self.hv_new = self.hv_inst = {}
2863

    
2864
    # beparams processing
2865
    if self.op.beparams:
2866
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2867
                                  use_none=True)
2868
      objects.UpgradeBeParams(i_bedict)
2869
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2870
      be_new = self.cluster.SimpleFillBE(i_bedict)
2871
      self.be_proposed = self.be_new = be_new # the new actual values
2872
      self.be_inst = i_bedict # the new dict (without defaults)
2873
    else:
2874
      self.be_new = self.be_inst = {}
2875
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2876
    be_old = self.cluster.FillBE(self.instance)
2877

    
2878
    # CPU param validation -- checking every time a parameter is
2879
    # changed to cover all cases where either CPU mask or vcpus have
2880
    # changed
2881
    if (constants.BE_VCPUS in self.be_proposed and
2882
        constants.HV_CPU_MASK in self.hv_proposed):
2883
      cpu_list = \
2884
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2885
      # Verify mask is consistent with number of vCPUs. Can skip this
2886
      # test if only 1 entry in the CPU mask, which means same mask
2887
      # is applied to all vCPUs.
2888
      if (len(cpu_list) > 1 and
2889
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2890
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2891
                                   " CPU mask [%s]" %
2892
                                   (self.be_proposed[constants.BE_VCPUS],
2893
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2894
                                   errors.ECODE_INVAL)
2895

    
2896
      # Only perform this test if a new CPU mask is given
2897
      if constants.HV_CPU_MASK in self.hv_new:
2898
        # Calculate the largest CPU number requested
2899
        max_requested_cpu = max(map(max, cpu_list))
2900
        # Check that all of the instance's nodes have enough physical CPUs to
2901
        # satisfy the requested CPU mask
2902
        hvspecs = [(self.instance.hypervisor,
2903
                    self.cfg.GetClusterInfo()
2904
                      .hvparams[self.instance.hypervisor])]
2905
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2906
                                max_requested_cpu + 1,
2907
                                hvspecs)
2908

    
2909
    # osparams processing
2910
    if self.op.osparams:
2911
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2912
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2913
      self.os_inst = i_osdict # the new dict (without defaults)
2914
    else:
2915
      self.os_inst = {}
2916

    
2917
    #TODO(dynmem): do the appropriate check involving MINMEM
2918
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2919
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2920
      mem_check_list = [pnode_uuid]
2921
      if be_new[constants.BE_AUTO_BALANCE]:
2922
        # either we changed auto_balance to yes or it was from before
2923
        mem_check_list.extend(self.instance.secondary_nodes)
2924
      instance_info = self.rpc.call_instance_info(
2925
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2926
          self.instance.hvparams)
2927
      hvspecs = [(self.instance.hypervisor,
2928
                  self.cluster.hvparams[self.instance.hypervisor])]
2929
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2930
                                         hvspecs)
2931
      pninfo = nodeinfo[pnode_uuid]
2932
      msg = pninfo.fail_msg
2933
      if msg:
2934
        # Assume the primary node is unreachable and go ahead
2935
        self.warn.append("Can't get info from primary node %s: %s" %
2936
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2937
      else:
2938
        (_, _, (pnhvinfo, )) = pninfo.payload
2939
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2940
          self.warn.append("Node data from primary node %s doesn't contain"
2941
                           " free memory information" %
2942
                           self.cfg.GetNodeName(pnode_uuid))
2943
        elif instance_info.fail_msg:
2944
          self.warn.append("Can't get instance runtime information: %s" %
2945
                           instance_info.fail_msg)
2946
        else:
2947
          if instance_info.payload:
2948
            current_mem = int(instance_info.payload["memory"])
2949
          else:
2950
            # Assume instance not running
2951
            # (there is a slight race condition here, but it's not very
2952
            # probable, and we have no other way to check)
2953
            # TODO: Describe race condition
2954
            current_mem = 0
2955
          #TODO(dynmem): do the appropriate check involving MINMEM
2956
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2957
                      pnhvinfo["memory_free"])
2958
          if miss_mem > 0:
2959
            raise errors.OpPrereqError("This change will prevent the instance"
2960
                                       " from starting, due to %d MB of memory"
2961
                                       " missing on its primary node" %
2962
                                       miss_mem, errors.ECODE_NORES)
2963

    
2964
      if be_new[constants.BE_AUTO_BALANCE]:
2965
        for node_uuid, nres in nodeinfo.items():
2966
          if node_uuid not in self.instance.secondary_nodes:
2967
            continue
2968
          nres.Raise("Can't get info from secondary node %s" %
2969
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2970
                     ecode=errors.ECODE_STATE)
2971
          (_, _, (nhvinfo, )) = nres.payload
2972
          if not isinstance(nhvinfo.get("memory_free", None), int):
2973
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2974
                                       " memory information" %
2975
                                       self.cfg.GetNodeName(node_uuid),
2976
                                       errors.ECODE_STATE)
2977
          #TODO(dynmem): do the appropriate check involving MINMEM
2978
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2979
            raise errors.OpPrereqError("This change will prevent the instance"
2980
                                       " from failover to its secondary node"
2981
                                       " %s, due to not enough memory" %
2982
                                       self.cfg.GetNodeName(node_uuid),
2983
                                       errors.ECODE_STATE)
2984

    
2985
    if self.op.runtime_mem:
2986
      remote_info = self.rpc.call_instance_info(
2987
         self.instance.primary_node, self.instance.name,
2988
         self.instance.hypervisor,
2989
         self.cluster.hvparams[self.instance.hypervisor])
2990
      remote_info.Raise("Error checking node %s" %
2991
                        self.cfg.GetNodeName(self.instance.primary_node))
2992
      if not remote_info.payload: # not running already
2993
        raise errors.OpPrereqError("Instance %s is not running" %
2994
                                   self.instance.name, errors.ECODE_STATE)
2995

    
2996
      current_memory = remote_info.payload["memory"]
2997
      if (not self.op.force and
2998
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2999
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3000
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3001
                                   " and %d MB of memory unless --force is"
3002
                                   " given" %
3003
                                   (self.instance.name,
3004
                                    self.be_proposed[constants.BE_MINMEM],
3005
                                    self.be_proposed[constants.BE_MAXMEM]),
3006
                                   errors.ECODE_INVAL)
3007

    
3008
      delta = self.op.runtime_mem - current_memory
3009
      if delta > 0:
3010
        CheckNodeFreeMemory(
3011
            self, self.instance.primary_node,
3012
            "ballooning memory for instance %s" % self.instance.name, delta,
3013
            self.instance.hypervisor,
3014
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3015

    
3016
    # make self.cluster visible in the functions below
3017
    cluster = self.cluster
3018

    
3019
    def _PrepareNicCreate(_, params, private):
3020
      self._PrepareNicModification(params, private, None, None,
3021
                                   {}, cluster, pnode_uuid)
3022
      return (None, None)
3023

    
3024
    def _PrepareNicMod(_, nic, params, private):
3025
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3026
                                   nic.nicparams, cluster, pnode_uuid)
3027
      return None
3028

    
3029
    def _PrepareNicRemove(_, params, __):
3030
      ip = params.ip
3031
      net = params.network
3032
      if net is not None and ip is not None:
3033
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3034

    
3035
    # Verify NIC changes (operating on copy)
3036
    nics = self.instance.nics[:]
3037
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3038
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3039
    if len(nics) > constants.MAX_NICS:
3040
      raise errors.OpPrereqError("Instance has too many network interfaces"
3041
                                 " (%d), cannot add more" % constants.MAX_NICS,
3042
                                 errors.ECODE_STATE)
3043

    
3044
    # Pre-compute NIC changes (necessary to use result in hooks)
3045
    self._nic_chgdesc = []
3046
    if self.nicmod:
3047
      # Operate on copies as this is still in prereq
3048
      nics = [nic.Copy() for nic in self.instance.nics]
3049
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3050
                          self._CreateNewNic, self._ApplyNicMods, None)
3051
      # Verify that NIC names are unique and valid
3052
      utils.ValidateDeviceNames("NIC", nics)
3053
      self._new_nics = nics
3054
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3055
    else:
3056
      self._new_nics = None
3057
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3058

    
3059
    if not self.op.ignore_ipolicy:
3060
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3061
                                                              group_info)
3062

    
3063
      # Fill ispec with backend parameters
3064
      ispec[constants.ISPEC_SPINDLE_USE] = \
3065
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3066
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3067
                                                         None)
3068

    
3069
      # Copy ispec to verify parameters with min/max values separately
3070
      if self.op.disk_template:
3071
        new_disk_template = self.op.disk_template
3072
      else:
3073
        new_disk_template = self.instance.disk_template
3074
      ispec_max = ispec.copy()
3075
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3076
        self.be_new.get(constants.BE_MAXMEM, None)
3077
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3078
                                                     new_disk_template)
3079
      ispec_min = ispec.copy()
3080
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3081
        self.be_new.get(constants.BE_MINMEM, None)
3082
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3083
                                                     new_disk_template)
3084

    
3085
      if (res_max or res_min):
3086
        # FIXME: Improve error message by including information about whether
3087
        # the upper or lower limit of the parameter fails the ipolicy.
3088
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3089
               (group_info, group_info.name,
3090
                utils.CommaJoin(set(res_max + res_min))))
3091
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3092

    
3093
  def _ConvertPlainToDrbd(self, feedback_fn):
3094
    """Converts an instance from plain to drbd.
3095

3096
    """
3097
    feedback_fn("Converting template to drbd")
3098
    pnode_uuid = self.instance.primary_node
3099
    snode_uuid = self.op.remote_node_uuid
3100

    
3101
    assert self.instance.disk_template == constants.DT_PLAIN
3102

    
3103
    # create a fake disk info for _GenerateDiskTemplate
3104
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3105
                  constants.IDISK_VG: d.logical_id[0],
3106
                  constants.IDISK_NAME: d.name}
3107
                 for d in self.instance.disks]
3108
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3109
                                     self.instance.uuid, pnode_uuid,
3110
                                     [snode_uuid], disk_info, None, None, 0,
3111
                                     feedback_fn, self.diskparams)
3112
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3113
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3114
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3115
    info = GetInstanceInfoText(self.instance)
3116
    feedback_fn("Creating additional volumes...")
3117
    # first, create the missing data and meta devices
3118
    for disk in anno_disks:
3119
      # unfortunately this is... not too nice
3120
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3121
                           info, True, p_excl_stor)
3122
      for child in disk.children:
3123
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3124
                             s_excl_stor)
3125
    # at this stage, all new LVs have been created, we can rename the
3126
    # old ones
3127
    feedback_fn("Renaming original volumes...")
3128
    rename_list = [(o, n.children[0].logical_id)
3129
                   for (o, n) in zip(self.instance.disks, new_disks)]
3130
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3131
    result.Raise("Failed to rename original LVs")
3132

    
3133
    feedback_fn("Initializing DRBD devices...")
3134
    # all child devices are in place, we can now create the DRBD devices
3135
    try:
3136
      for disk in anno_disks:
3137
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3138
                                       (snode_uuid, s_excl_stor)]:
3139
          f_create = node_uuid == pnode_uuid
3140
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3141
                               f_create, excl_stor)
3142
    except errors.GenericError, e:
3143
      feedback_fn("Initializing of DRBD devices failed;"
3144
                  " renaming back original volumes...")
3145
      rename_back_list = [(n.children[0], o.logical_id)
3146
                          for (n, o) in zip(new_disks, self.instance.disks)]
3147
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3148
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3149
      raise
3150

    
3151
    # at this point, the instance has been modified
3152
    self.instance.disk_template = constants.DT_DRBD8
3153
    self.instance.disks = new_disks
3154
    self.cfg.Update(self.instance, feedback_fn)
3155

    
3156
    # Release node locks while waiting for sync
3157
    ReleaseLocks(self, locking.LEVEL_NODE)
3158

    
3159
    # disks are created, waiting for sync
3160
    disk_abort = not WaitForSync(self, self.instance,
3161
                                 oneshot=not self.op.wait_for_sync)
3162
    if disk_abort:
3163
      raise errors.OpExecError("There are some degraded disks for"
3164
                               " this instance, please cleanup manually")
3165

    
3166
    # Node resource locks will be released by caller
3167

    
3168
  def _ConvertDrbdToPlain(self, feedback_fn):
3169
    """Converts an instance from drbd to plain.
3170

3171
    """
3172
    assert len(self.instance.secondary_nodes) == 1
3173
    assert self.instance.disk_template == constants.DT_DRBD8
3174

    
3175
    pnode_uuid = self.instance.primary_node
3176
    snode_uuid = self.instance.secondary_nodes[0]
3177
    feedback_fn("Converting template to plain")
3178

    
3179
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3180
    new_disks = [d.children[0] for d in self.instance.disks]
3181

    
3182
    # copy over size, mode and name
3183
    for parent, child in zip(old_disks, new_disks):
3184
      child.size = parent.size
3185
      child.mode = parent.mode
3186
      child.name = parent.name
3187

    
3188
    # this is a DRBD disk, return its port to the pool
3189
    # NOTE: this must be done right before the call to cfg.Update!
3190
    for disk in old_disks:
3191
      tcp_port = disk.logical_id[2]
3192
      self.cfg.AddTcpUdpPort(tcp_port)
3193

    
3194
    # update instance structure
3195
    self.instance.disks = new_disks
3196
    self.instance.disk_template = constants.DT_PLAIN
3197
    _UpdateIvNames(0, self.instance.disks)
3198
    self.cfg.Update(self.instance, feedback_fn)
3199

    
3200
    # Release locks in case removing disks takes a while
3201
    ReleaseLocks(self, locking.LEVEL_NODE)
3202

    
3203
    feedback_fn("Removing volumes on the secondary node...")
3204
    for disk in old_disks:
3205
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3206
      result.Warn("Could not remove block device %s on node %s,"
3207
                  " continuing anyway" %
3208
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3209
                  self.LogWarning)
3210

    
3211
    feedback_fn("Removing unneeded volumes on the primary node...")
3212
    for idx, disk in enumerate(old_disks):
3213
      meta = disk.children[1]
3214
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3215
      result.Warn("Could not remove metadata for disk %d on node %s,"
3216
                  " continuing anyway" %
3217
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3218
                  self.LogWarning)
3219

    
3220
  def _CreateNewDisk(self, idx, params, _):
3221
    """Creates a new disk.
3222

3223
    """
3224
    # add a new disk
3225
    if self.instance.disk_template in constants.DTS_FILEBASED:
3226
      (file_driver, file_path) = self.instance.disks[0].logical_id
3227
      file_path = os.path.dirname(file_path)
3228
    else:
3229
      file_driver = file_path = None
3230

    
3231
    disk = \
3232
      GenerateDiskTemplate(self, self.instance.disk_template,
3233
                           self.instance.uuid, self.instance.primary_node,
3234
                           self.instance.secondary_nodes, [params], file_path,
3235
                           file_driver, idx, self.Log, self.diskparams)[0]
3236

    
3237
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3238

    
3239
    if self.cluster.prealloc_wipe_disks:
3240
      # Wipe new disk
3241
      WipeOrCleanupDisks(self, self.instance,
3242
                         disks=[(idx, disk, 0)],
3243
                         cleanup=new_disks)
3244

    
3245
    return (disk, [
3246
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3247
      ])
3248

    
3249
  def _PostAddDisk(self, _, disk):
3250
    if not WaitForSync(self, self.instance, disks=[disk],
3251
                       oneshot=not self.op.wait_for_sync):
3252
      raise errors.OpExecError("Failed to sync disks of %s" %
3253
                               self.instance.name)
3254

    
3255
    # the disk is active at this point, so deactivate it if the instance disks
3256
    # are supposed to be inactive
3257
    if not self.instance.disks_active:
3258
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3259

    
3260
  @staticmethod
3261
  def _ModifyDisk(idx, disk, params, _):
3262
    """Modifies a disk.
3263

3264
    """
3265
    changes = []
3266
    mode = params.get(constants.IDISK_MODE, None)
3267
    if mode:
3268
      disk.mode = mode
3269
      changes.append(("disk.mode/%d" % idx, disk.mode))
3270

    
3271
    name = params.get(constants.IDISK_NAME, None)
3272
    disk.name = name
3273
    changes.append(("disk.name/%d" % idx, disk.name))
3274

    
3275
    return changes
3276

    
3277
  def _RemoveDisk(self, idx, root, _):
3278
    """Removes a disk.
3279

3280
    """
3281
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3282
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3283
                             self.instance.primary_node):
3284
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3285
              .fail_msg
3286
      if msg:
3287
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3288
                        " continuing anyway", idx,
3289
                        self.cfg.GetNodeName(node_uuid), msg)
3290

    
3291
    # if this is a DRBD disk, return its port to the pool
3292
    if root.dev_type in constants.LDS_DRBD:
3293
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3294

    
3295
  def _CreateNewNic(self, idx, params, private):
3296
    """Creates data structure for a new network interface.
3297

3298
    """
3299
    mac = params[constants.INIC_MAC]
3300
    ip = params.get(constants.INIC_IP, None)
3301
    net = params.get(constants.INIC_NETWORK, None)
3302
    name = params.get(constants.INIC_NAME, None)
3303
    net_uuid = self.cfg.LookupNetwork(net)
3304
    #TODO: not private.filled?? can a nic have no nicparams??
3305
    nicparams = private.filled
3306
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3307
                       nicparams=nicparams)
3308
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3309

    
3310
    return (nobj, [
3311
      ("nic.%d" % idx,
3312
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3313
       (mac, ip, private.filled[constants.NIC_MODE],
3314
       private.filled[constants.NIC_LINK],
3315
       net)),
3316
      ])
3317

    
3318
  def _ApplyNicMods(self, idx, nic, params, private):
3319
    """Modifies a network interface.
3320

3321
    """
3322
    changes = []
3323

    
3324
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3325
      if key in params:
3326
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3327
        setattr(nic, key, params[key])
3328

    
3329
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3330
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3331
    if new_net_uuid != nic.network:
3332
      changes.append(("nic.network/%d" % idx, new_net))
3333
      nic.network = new_net_uuid
3334

    
3335
    if private.filled:
3336
      nic.nicparams = private.filled
3337

    
3338
      for (key, val) in nic.nicparams.items():
3339
        changes.append(("nic.%s/%d" % (key, idx), val))
3340

    
3341
    return changes
3342

    
3343
  def Exec(self, feedback_fn):
3344
    """Modifies an instance.
3345

3346
    All parameters take effect only at the next restart of the instance.
3347

3348
    """
3349
    # Process here the warnings from CheckPrereq, as we don't have a
3350
    # feedback_fn there.
3351
    # TODO: Replace with self.LogWarning
3352
    for warn in self.warn:
3353
      feedback_fn("WARNING: %s" % warn)
3354

    
3355
    assert ((self.op.disk_template is None) ^
3356
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3357
      "Not owning any node resource locks"
3358

    
3359
    result = []
3360

    
3361
    # New primary node
3362
    if self.op.pnode_uuid:
3363
      self.instance.primary_node = self.op.pnode_uuid
3364

    
3365
    # runtime memory
3366
    if self.op.runtime_mem:
3367
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3368
                                                     self.instance,
3369
                                                     self.op.runtime_mem)
3370
      rpcres.Raise("Cannot modify instance runtime memory")
3371
      result.append(("runtime_memory", self.op.runtime_mem))
3372

    
3373
    # Apply disk changes
3374
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3375
                        self._CreateNewDisk, self._ModifyDisk,
3376
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3377
    _UpdateIvNames(0, self.instance.disks)
3378

    
3379
    if self.op.disk_template:
3380
      if __debug__:
3381
        check_nodes = set(self.instance.all_nodes)
3382
        if self.op.remote_node_uuid:
3383
          check_nodes.add(self.op.remote_node_uuid)
3384
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3385
          owned = self.owned_locks(level)
3386
          assert not (check_nodes - owned), \
3387
            ("Not owning the correct locks, owning %r, expected at least %r" %
3388
             (owned, check_nodes))
3389

    
3390
      r_shut = ShutdownInstanceDisks(self, self.instance)
3391
      if not r_shut:
3392
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3393
                                 " proceed with disk template conversion")
3394
      mode = (self.instance.disk_template, self.op.disk_template)
3395
      try:
3396
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3397
      except:
3398
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3399
        raise
3400
      result.append(("disk_template", self.op.disk_template))
3401

    
3402
      assert self.instance.disk_template == self.op.disk_template, \
3403
        ("Expected disk template '%s', found '%s'" %
3404
         (self.op.disk_template, self.instance.disk_template))
3405

    
3406
    # Release node and resource locks if there are any (they might already have
3407
    # been released during disk conversion)
3408
    ReleaseLocks(self, locking.LEVEL_NODE)
3409
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3410

    
3411
    # Apply NIC changes
3412
    if self._new_nics is not None:
3413
      self.instance.nics = self._new_nics
3414
      result.extend(self._nic_chgdesc)
3415

    
3416
    # hvparams changes
3417
    if self.op.hvparams:
3418
      self.instance.hvparams = self.hv_inst
3419
      for key, val in self.op.hvparams.iteritems():
3420
        result.append(("hv/%s" % key, val))
3421

    
3422
    # beparams changes
3423
    if self.op.beparams:
3424
      self.instance.beparams = self.be_inst
3425
      for key, val in self.op.beparams.iteritems():
3426
        result.append(("be/%s" % key, val))
3427

    
3428
    # OS change
3429
    if self.op.os_name:
3430
      self.instance.os = self.op.os_name
3431

    
3432
    # osparams changes
3433
    if self.op.osparams:
3434
      self.instance.osparams = self.os_inst
3435
      for key, val in self.op.osparams.iteritems():
3436
        result.append(("os/%s" % key, val))
3437

    
3438
    if self.op.offline is None:
3439
      # Ignore
3440
      pass
3441
    elif self.op.offline:
3442
      # Mark instance as offline
3443
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3444
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3445
    else:
3446
      # Mark instance as online, but stopped
3447
      self.cfg.MarkInstanceDown(self.instance.uuid)
3448
      result.append(("admin_state", constants.ADMINST_DOWN))
3449

    
3450
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3451

    
3452
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3453
                self.owned_locks(locking.LEVEL_NODE)), \
3454
      "All node locks should have been released by now"
3455

    
3456
    return result
3457

    
3458
  _DISK_CONVERSIONS = {
3459
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3460
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3461
    }
3462

    
3463

    
3464
class LUInstanceChangeGroup(LogicalUnit):
3465
  HPATH = "instance-change-group"
3466
  HTYPE = constants.HTYPE_INSTANCE
3467
  REQ_BGL = False
3468

    
3469
  def ExpandNames(self):
3470
    self.share_locks = ShareAll()
3471

    
3472
    self.needed_locks = {
3473
      locking.LEVEL_NODEGROUP: [],
3474
      locking.LEVEL_NODE: [],
3475
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3476
      }
3477

    
3478
    self._ExpandAndLockInstance()
3479

    
3480
    if self.op.target_groups:
3481
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3482
                                  self.op.target_groups)
3483
    else:
3484
      self.req_target_uuids = None
3485

    
3486
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3487

    
3488
  def DeclareLocks(self, level):
3489
    if level == locking.LEVEL_NODEGROUP:
3490
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3491

    
3492
      if self.req_target_uuids:
3493
        lock_groups = set(self.req_target_uuids)
3494

    
3495
        # Lock all groups used by instance optimistically; this requires going
3496
        # via the node before it's locked, requiring verification later on
3497
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3498
        lock_groups.update(instance_groups)
3499
      else:
3500
        # No target groups, need to lock all of them
3501
        lock_groups = locking.ALL_SET
3502

    
3503
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3504

    
3505
    elif level == locking.LEVEL_NODE:
3506
      if self.req_target_uuids:
3507
        # Lock all nodes used by instances
3508
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3509
        self._LockInstancesNodes()
3510

    
3511
        # Lock all nodes in all potential target groups
3512
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3513
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3514
        member_nodes = [node_uuid
3515
                        for group in lock_groups
3516
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3517
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3518
      else:
3519
        # Lock all nodes as all groups are potential targets
3520
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3521

    
3522
  def CheckPrereq(self):
3523
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3524
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3525
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3526

    
3527
    assert (self.req_target_uuids is None or
3528
            owned_groups.issuperset(self.req_target_uuids))
3529
    assert owned_instance_names == set([self.op.instance_name])
3530

    
3531
    # Get instance information
3532
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3533

    
3534
    # Check if node groups for locked instance are still correct
3535
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3536
      ("Instance %s's nodes changed while we kept the lock" %
3537
       self.op.instance_name)
3538

    
3539
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3540
                                          owned_groups)
3541

    
3542
    if self.req_target_uuids:
3543
      # User requested specific target groups
3544
      self.target_uuids = frozenset(self.req_target_uuids)
3545
    else:
3546
      # All groups except those used by the instance are potential targets
3547
      self.target_uuids = owned_groups - inst_groups
3548

    
3549
    conflicting_groups = self.target_uuids & inst_groups
3550
    if conflicting_groups:
3551
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3552
                                 " used by the instance '%s'" %
3553
                                 (utils.CommaJoin(conflicting_groups),
3554
                                  self.op.instance_name),
3555
                                 errors.ECODE_INVAL)
3556

    
3557
    if not self.target_uuids:
3558
      raise errors.OpPrereqError("There are no possible target groups",
3559
                                 errors.ECODE_INVAL)
3560

    
3561
  def BuildHooksEnv(self):
3562
    """Build hooks env.
3563

3564
    """
3565
    assert self.target_uuids
3566

    
3567
    env = {
3568
      "TARGET_GROUPS": " ".join(self.target_uuids),
3569
      }
3570

    
3571
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3572

    
3573
    return env
3574

    
3575
  def BuildHooksNodes(self):
3576
    """Build hooks nodes.
3577

3578
    """
3579
    mn = self.cfg.GetMasterNode()
3580
    return ([mn], [mn])
3581

    
3582
  def Exec(self, feedback_fn):
3583
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3584

    
3585
    assert instances == [self.op.instance_name], "Instance not locked"
3586

    
3587
    req = iallocator.IAReqGroupChange(instances=instances,
3588
                                      target_groups=list(self.target_uuids))
3589
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3590

    
3591
    ial.Run(self.op.iallocator)
3592

    
3593
    if not ial.success:
3594
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3595
                                 " instance '%s' using iallocator '%s': %s" %
3596
                                 (self.op.instance_name, self.op.iallocator,
3597
                                  ial.info), errors.ECODE_NORES)
3598

    
3599
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3600

    
3601
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3602
                 " instance '%s'", len(jobs), self.op.instance_name)
3603

    
3604
    return ResultWithJobs(jobs)