Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 74676af4

History | View | Annotate | Download (142.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _DeviceHotplugable(dev):
80

    
81
  return dev.uuid is not None
82

    
83

    
84
def _CheckHostnameSane(lu, name):
85
  """Ensures that a given hostname resolves to a 'sane' name.
86

87
  The given name is required to be a prefix of the resolved hostname,
88
  to prevent accidental mismatches.
89

90
  @param lu: the logical unit on behalf of which we're checking
91
  @param name: the name we should resolve and check
92
  @return: the resolved hostname object
93

94
  """
95
  hostname = netutils.GetHostname(name=name)
96
  if hostname.name != name:
97
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
98
  if not utils.MatchNameComponent(name, [hostname.name]):
99
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
100
                                " same as given hostname '%s'") %
101
                               (hostname.name, name), errors.ECODE_INVAL)
102
  return hostname
103

    
104

    
105
def _CheckOpportunisticLocking(op):
106
  """Generate error if opportunistic locking is not possible.
107

108
  """
109
  if op.opportunistic_locking and not op.iallocator:
110
    raise errors.OpPrereqError("Opportunistic locking is only available in"
111
                               " combination with an instance allocator",
112
                               errors.ECODE_INVAL)
113

    
114

    
115
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
116
  """Wrapper around IAReqInstanceAlloc.
117

118
  @param op: The instance opcode
119
  @param disks: The computed disks
120
  @param nics: The computed nics
121
  @param beparams: The full filled beparams
122
  @param node_name_whitelist: List of nodes which should appear as online to the
123
    allocator (unless the node is already marked offline)
124

125
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
126

127
  """
128
  spindle_use = beparams[constants.BE_SPINDLE_USE]
129
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
130
                                       disk_template=op.disk_template,
131
                                       tags=op.tags,
132
                                       os=op.os_type,
133
                                       vcpus=beparams[constants.BE_VCPUS],
134
                                       memory=beparams[constants.BE_MAXMEM],
135
                                       spindle_use=spindle_use,
136
                                       disks=disks,
137
                                       nics=[n.ToDict() for n in nics],
138
                                       hypervisor=op.hypervisor,
139
                                       node_whitelist=node_name_whitelist)
140

    
141

    
142
def _ComputeFullBeParams(op, cluster):
143
  """Computes the full beparams.
144

145
  @param op: The instance opcode
146
  @param cluster: The cluster config object
147

148
  @return: The fully filled beparams
149

150
  """
151
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
152
  for param, value in op.beparams.iteritems():
153
    if value == constants.VALUE_AUTO:
154
      op.beparams[param] = default_beparams[param]
155
  objects.UpgradeBeParams(op.beparams)
156
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
157
  return cluster.SimpleFillBE(op.beparams)
158

    
159

    
160
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
161
  """Computes the nics.
162

163
  @param op: The instance opcode
164
  @param cluster: Cluster configuration object
165
  @param default_ip: The default ip to assign
166
  @param cfg: An instance of the configuration object
167
  @param ec_id: Execution context ID
168

169
  @returns: The build up nics
170

171
  """
172
  nics = []
173
  for nic in op.nics:
174
    nic_mode_req = nic.get(constants.INIC_MODE, None)
175
    nic_mode = nic_mode_req
176
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
177
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
178

    
179
    net = nic.get(constants.INIC_NETWORK, None)
180
    link = nic.get(constants.NIC_LINK, None)
181
    ip = nic.get(constants.INIC_IP, None)
182
    vlan = nic.get(constants.INIC_VLAN, None)
183

    
184
    if net is None or net.lower() == constants.VALUE_NONE:
185
      net = None
186
    else:
187
      if nic_mode_req is not None or link is not None:
188
        raise errors.OpPrereqError("If network is given, no mode or link"
189
                                   " is allowed to be passed",
190
                                   errors.ECODE_INVAL)
191

    
192
    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
193
      raise errors.OpPrereqError("VLAN is given, but network mode is not"
194
                                 " openvswitch", errors.ECODE_INVAL)
195

    
196
    # ip validity checks
197
    if ip is None or ip.lower() == constants.VALUE_NONE:
198
      nic_ip = None
199
    elif ip.lower() == constants.VALUE_AUTO:
200
      if not op.name_check:
201
        raise errors.OpPrereqError("IP address set to auto but name checks"
202
                                   " have been skipped",
203
                                   errors.ECODE_INVAL)
204
      nic_ip = default_ip
205
    else:
206
      # We defer pool operations until later, so that the iallocator has
207
      # filled in the instance's node(s) dimara
208
      if ip.lower() == constants.NIC_IP_POOL:
209
        if net is None:
210
          raise errors.OpPrereqError("if ip=pool, parameter network"
211
                                     " must be passed too",
212
                                     errors.ECODE_INVAL)
213

    
214
      elif not netutils.IPAddress.IsValid(ip):
215
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
216
                                   errors.ECODE_INVAL)
217

    
218
      nic_ip = ip
219

    
220
    # TODO: check the ip address for uniqueness
221
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
222
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
223
                                 errors.ECODE_INVAL)
224

    
225
    # MAC address verification
226
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
227
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
228
      mac = utils.NormalizeAndValidateMac(mac)
229

    
230
      try:
231
        # TODO: We need to factor this out
232
        cfg.ReserveMAC(mac, ec_id)
233
      except errors.ReservationError:
234
        raise errors.OpPrereqError("MAC address %s already in use"
235
                                   " in cluster" % mac,
236
                                   errors.ECODE_NOTUNIQUE)
237

    
238
    #  Build nic parameters
239
    nicparams = {}
240
    if nic_mode_req:
241
      nicparams[constants.NIC_MODE] = nic_mode
242
    if link:
243
      nicparams[constants.NIC_LINK] = link
244
    if vlan:
245
      nicparams[constants.NIC_VLAN] = vlan
246

    
247
    check_params = cluster.SimpleFillNIC(nicparams)
248
    objects.NIC.CheckParameterSyntax(check_params)
249
    net_uuid = cfg.LookupNetwork(net)
250
    name = nic.get(constants.INIC_NAME, None)
251
    if name is not None and name.lower() == constants.VALUE_NONE:
252
      name = None
253
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
254
                          network=net_uuid, nicparams=nicparams)
255
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
256
    nics.append(nic_obj)
257

    
258
  return nics
259

    
260

    
261
def _CheckForConflictingIp(lu, ip, node_uuid):
262
  """In case of conflicting IP address raise error.
263

264
  @type ip: string
265
  @param ip: IP address
266
  @type node_uuid: string
267
  @param node_uuid: node UUID
268

269
  """
270
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
271
  if conf_net is not None:
272
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
273
                                " network %s, but the target NIC does not." %
274
                                (ip, conf_net)),
275
                               errors.ECODE_STATE)
276

    
277
  return (None, None)
278

    
279

    
280
def _ComputeIPolicyInstanceSpecViolation(
281
  ipolicy, instance_spec, disk_template,
282
  _compute_fn=ComputeIPolicySpecViolation):
283
  """Compute if instance specs meets the specs of ipolicy.
284

285
  @type ipolicy: dict
286
  @param ipolicy: The ipolicy to verify against
287
  @param instance_spec: dict
288
  @param instance_spec: The instance spec to verify
289
  @type disk_template: string
290
  @param disk_template: the disk template of the instance
291
  @param _compute_fn: The function to verify ipolicy (unittest only)
292
  @see: L{ComputeIPolicySpecViolation}
293

294
  """
295
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
296
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
297
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
298
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
299
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
300
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
301

    
302
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
303
                     disk_sizes, spindle_use, disk_template)
304

    
305

    
306
def _CheckOSVariant(os_obj, name):
307
  """Check whether an OS name conforms to the os variants specification.
308

309
  @type os_obj: L{objects.OS}
310
  @param os_obj: OS object to check
311
  @type name: string
312
  @param name: OS name passed by the user, to check for validity
313

314
  """
315
  variant = objects.OS.GetVariant(name)
316
  if not os_obj.supported_variants:
317
    if variant:
318
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
319
                                 " passed)" % (os_obj.name, variant),
320
                                 errors.ECODE_INVAL)
321
    return
322
  if not variant:
323
    raise errors.OpPrereqError("OS name must include a variant",
324
                               errors.ECODE_INVAL)
325

    
326
  if variant not in os_obj.supported_variants:
327
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
328

    
329

    
330
class LUInstanceCreate(LogicalUnit):
331
  """Create an instance.
332

333
  """
334
  HPATH = "instance-add"
335
  HTYPE = constants.HTYPE_INSTANCE
336
  REQ_BGL = False
337

    
338
  def _CheckDiskTemplateValid(self):
339
    """Checks validity of disk template.
340

341
    """
342
    cluster = self.cfg.GetClusterInfo()
343
    if self.op.disk_template is None:
344
      # FIXME: It would be better to take the default disk template from the
345
      # ipolicy, but for the ipolicy we need the primary node, which we get from
346
      # the iallocator, which wants the disk template as input. To solve this
347
      # chicken-and-egg problem, it should be possible to specify just a node
348
      # group from the iallocator and take the ipolicy from that.
349
      self.op.disk_template = cluster.enabled_disk_templates[0]
350
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
351

    
352
  def _CheckDiskArguments(self):
353
    """Checks validity of disk-related arguments.
354

355
    """
356
    # check that disk's names are unique and valid
357
    utils.ValidateDeviceNames("disk", self.op.disks)
358

    
359
    self._CheckDiskTemplateValid()
360

    
361
    # check disks. parameter names and consistent adopt/no-adopt strategy
362
    has_adopt = has_no_adopt = False
363
    for disk in self.op.disks:
364
      if self.op.disk_template != constants.DT_EXT:
365
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
366
      if constants.IDISK_ADOPT in disk:
367
        has_adopt = True
368
      else:
369
        has_no_adopt = True
370
    if has_adopt and has_no_adopt:
371
      raise errors.OpPrereqError("Either all disks are adopted or none is",
372
                                 errors.ECODE_INVAL)
373
    if has_adopt:
374
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
375
        raise errors.OpPrereqError("Disk adoption is not supported for the"
376
                                   " '%s' disk template" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379
      if self.op.iallocator is not None:
380
        raise errors.OpPrereqError("Disk adoption not allowed with an"
381
                                   " iallocator script", errors.ECODE_INVAL)
382
      if self.op.mode == constants.INSTANCE_IMPORT:
383
        raise errors.OpPrereqError("Disk adoption not allowed for"
384
                                   " instance import", errors.ECODE_INVAL)
385
    else:
386
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
387
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
388
                                   " but no 'adopt' parameter given" %
389
                                   self.op.disk_template,
390
                                   errors.ECODE_INVAL)
391

    
392
    self.adopt_disks = has_adopt
393

    
394
  def _CheckVLANArguments(self):
395
    """ Check validity of VLANs if given
396

397
    """
398
    for nic in self.op.nics:
399
      vlan = nic.get(constants.INIC_VLAN, None)
400
      if vlan:
401
        if vlan[0] == ".":
402
          # vlan starting with dot means single untagged vlan,
403
          # might be followed by trunk (:)
404
          if not vlan[1:].isdigit():
405
            vlanlist = vlan[1:].split(':')
406
            for vl in vlanlist:
407
              if not vl.isdigit():
408
                raise errors.OpPrereqError("Specified VLAN parameter is "
409
                                           "invalid : %s" % vlan,
410
                                             errors.ECODE_INVAL)
411
        elif vlan[0] == ":":
412
          # Trunk - tagged only
413
          vlanlist = vlan[1:].split(':')
414
          for vl in vlanlist:
415
            if not vl.isdigit():
416
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
417
                                           " : %s" % vlan, errors.ECODE_INVAL)
418
        elif vlan.isdigit():
419
          # This is the simplest case. No dots, only single digit
420
          # -> Create untagged access port, dot needs to be added
421
          nic[constants.INIC_VLAN] = "." + vlan
422
        else:
423
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
424
                                       " : %s" % vlan, errors.ECODE_INVAL)
425

    
426
  def CheckArguments(self):
427
    """Check arguments.
428

429
    """
430
    # do not require name_check to ease forward/backward compatibility
431
    # for tools
432
    if self.op.no_install and self.op.start:
433
      self.LogInfo("No-installation mode selected, disabling startup")
434
      self.op.start = False
435
    # validate/normalize the instance name
436
    self.op.instance_name = \
437
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
438

    
439
    if self.op.ip_check and not self.op.name_check:
440
      # TODO: make the ip check more flexible and not depend on the name check
441
      raise errors.OpPrereqError("Cannot do IP address check without a name"
442
                                 " check", errors.ECODE_INVAL)
443

    
444
    # check nics' parameter names
445
    for nic in self.op.nics:
446
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
447
    # check that NIC's parameters names are unique and valid
448
    utils.ValidateDeviceNames("NIC", self.op.nics)
449

    
450
    self._CheckVLANArguments()
451

    
452
    self._CheckDiskArguments()
453

    
454
    # instance name verification
455
    if self.op.name_check:
456
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
457
      self.op.instance_name = self.hostname.name
458
      # used in CheckPrereq for ip ping check
459
      self.check_ip = self.hostname.ip
460
    else:
461
      self.check_ip = None
462

    
463
    # file storage checks
464
    if (self.op.file_driver and
465
        not self.op.file_driver in constants.FILE_DRIVER):
466
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
467
                                 self.op.file_driver, errors.ECODE_INVAL)
468

    
469
    ### Node/iallocator related checks
470
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
471

    
472
    if self.op.pnode is not None:
473
      if self.op.disk_template in constants.DTS_INT_MIRROR:
474
        if self.op.snode is None:
475
          raise errors.OpPrereqError("The networked disk templates need"
476
                                     " a mirror node", errors.ECODE_INVAL)
477
      elif self.op.snode:
478
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
479
                        " template")
480
        self.op.snode = None
481

    
482
    _CheckOpportunisticLocking(self.op)
483

    
484
    self._cds = GetClusterDomainSecret()
485

    
486
    if self.op.mode == constants.INSTANCE_IMPORT:
487
      # On import force_variant must be True, because if we forced it at
488
      # initial install, our only chance when importing it back is that it
489
      # works again!
490
      self.op.force_variant = True
491

    
492
      if self.op.no_install:
493
        self.LogInfo("No-installation mode has no effect during import")
494

    
495
    elif self.op.mode == constants.INSTANCE_CREATE:
496
      if self.op.os_type is None:
497
        raise errors.OpPrereqError("No guest OS specified",
498
                                   errors.ECODE_INVAL)
499
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
500
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
501
                                   " installation" % self.op.os_type,
502
                                   errors.ECODE_STATE)
503
      if self.op.disk_template is None:
504
        raise errors.OpPrereqError("No disk template specified",
505
                                   errors.ECODE_INVAL)
506

    
507
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
508
      # Check handshake to ensure both clusters have the same domain secret
509
      src_handshake = self.op.source_handshake
510
      if not src_handshake:
511
        raise errors.OpPrereqError("Missing source handshake",
512
                                   errors.ECODE_INVAL)
513

    
514
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
515
                                                           src_handshake)
516
      if errmsg:
517
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
518
                                   errors.ECODE_INVAL)
519

    
520
      # Load and check source CA
521
      self.source_x509_ca_pem = self.op.source_x509_ca
522
      if not self.source_x509_ca_pem:
523
        raise errors.OpPrereqError("Missing source X509 CA",
524
                                   errors.ECODE_INVAL)
525

    
526
      try:
527
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
528
                                                    self._cds)
529
      except OpenSSL.crypto.Error, err:
530
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
531
                                   (err, ), errors.ECODE_INVAL)
532

    
533
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
534
      if errcode is not None:
535
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
536
                                   errors.ECODE_INVAL)
537

    
538
      self.source_x509_ca = cert
539

    
540
      src_instance_name = self.op.source_instance_name
541
      if not src_instance_name:
542
        raise errors.OpPrereqError("Missing source instance name",
543
                                   errors.ECODE_INVAL)
544

    
545
      self.source_instance_name = \
546
        netutils.GetHostname(name=src_instance_name).name
547

    
548
    else:
549
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
550
                                 self.op.mode, errors.ECODE_INVAL)
551

    
552
  def ExpandNames(self):
553
    """ExpandNames for CreateInstance.
554

555
    Figure out the right locks for instance creation.
556

557
    """
558
    self.needed_locks = {}
559

    
560
    # this is just a preventive check, but someone might still add this
561
    # instance in the meantime, and creation will fail at lock-add time
562
    if self.op.instance_name in\
563
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
564
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
565
                                 self.op.instance_name, errors.ECODE_EXISTS)
566

    
567
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
568

    
569
    if self.op.iallocator:
570
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
571
      # specifying a group on instance creation and then selecting nodes from
572
      # that group
573
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
574
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
575

    
576
      if self.op.opportunistic_locking:
577
        self.opportunistic_locks[locking.LEVEL_NODE] = True
578
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
579
    else:
580
      (self.op.pnode_uuid, self.op.pnode) = \
581
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
582
      nodelist = [self.op.pnode_uuid]
583
      if self.op.snode is not None:
584
        (self.op.snode_uuid, self.op.snode) = \
585
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
586
        nodelist.append(self.op.snode_uuid)
587
      self.needed_locks[locking.LEVEL_NODE] = nodelist
588

    
589
    # in case of import lock the source node too
590
    if self.op.mode == constants.INSTANCE_IMPORT:
591
      src_node = self.op.src_node
592
      src_path = self.op.src_path
593

    
594
      if src_path is None:
595
        self.op.src_path = src_path = self.op.instance_name
596

    
597
      if src_node is None:
598
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
599
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
600
        self.op.src_node = None
601
        if os.path.isabs(src_path):
602
          raise errors.OpPrereqError("Importing an instance from a path"
603
                                     " requires a source node option",
604
                                     errors.ECODE_INVAL)
605
      else:
606
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
607
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
608
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
609
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
610
        if not os.path.isabs(src_path):
611
          self.op.src_path = src_path = \
612
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
613

    
614
    self.needed_locks[locking.LEVEL_NODE_RES] = \
615
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
616

    
617
  def _RunAllocator(self):
618
    """Run the allocator based on input opcode.
619

620
    """
621
    if self.op.opportunistic_locking:
622
      # Only consider nodes for which a lock is held
623
      node_name_whitelist = self.cfg.GetNodeNames(
624
        self.owned_locks(locking.LEVEL_NODE))
625
    else:
626
      node_name_whitelist = None
627

    
628
    #TODO Export network to iallocator so that it chooses a pnode
629
    #     in a nodegroup that has the desired network connected to
630
    req = _CreateInstanceAllocRequest(self.op, self.disks,
631
                                      self.nics, self.be_full,
632
                                      node_name_whitelist)
633
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
634

    
635
    ial.Run(self.op.iallocator)
636

    
637
    if not ial.success:
638
      # When opportunistic locks are used only a temporary failure is generated
639
      if self.op.opportunistic_locking:
640
        ecode = errors.ECODE_TEMP_NORES
641
      else:
642
        ecode = errors.ECODE_NORES
643

    
644
      raise errors.OpPrereqError("Can't compute nodes using"
645
                                 " iallocator '%s': %s" %
646
                                 (self.op.iallocator, ial.info),
647
                                 ecode)
648

    
649
    (self.op.pnode_uuid, self.op.pnode) = \
650
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
651
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
652
                 self.op.instance_name, self.op.iallocator,
653
                 utils.CommaJoin(ial.result))
654

    
655
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
656

    
657
    if req.RequiredNodes() == 2:
658
      (self.op.snode_uuid, self.op.snode) = \
659
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
660

    
661
  def BuildHooksEnv(self):
662
    """Build hooks env.
663

664
    This runs on master, primary and secondary nodes of the instance.
665

666
    """
667
    env = {
668
      "ADD_MODE": self.op.mode,
669
      }
670
    if self.op.mode == constants.INSTANCE_IMPORT:
671
      env["SRC_NODE"] = self.op.src_node
672
      env["SRC_PATH"] = self.op.src_path
673
      env["SRC_IMAGES"] = self.src_images
674

    
675
    env.update(BuildInstanceHookEnv(
676
      name=self.op.instance_name,
677
      primary_node_name=self.op.pnode,
678
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
679
      status=self.op.start,
680
      os_type=self.op.os_type,
681
      minmem=self.be_full[constants.BE_MINMEM],
682
      maxmem=self.be_full[constants.BE_MAXMEM],
683
      vcpus=self.be_full[constants.BE_VCPUS],
684
      nics=NICListToTuple(self, self.nics),
685
      disk_template=self.op.disk_template,
686
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
687
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
688
             for d in self.disks],
689
      bep=self.be_full,
690
      hvp=self.hv_full,
691
      hypervisor_name=self.op.hypervisor,
692
      tags=self.op.tags,
693
      ))
694

    
695
    return env
696

    
697
  def BuildHooksNodes(self):
698
    """Build hooks nodes.
699

700
    """
701
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
702
    return nl, nl
703

    
704
  def _ReadExportInfo(self):
705
    """Reads the export information from disk.
706

707
    It will override the opcode source node and path with the actual
708
    information, if these two were not specified before.
709

710
    @return: the export information
711

712
    """
713
    assert self.op.mode == constants.INSTANCE_IMPORT
714

    
715
    if self.op.src_node_uuid is None:
716
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
717
      exp_list = self.rpc.call_export_list(locked_nodes)
718
      found = False
719
      for node in exp_list:
720
        if exp_list[node].fail_msg:
721
          continue
722
        if self.op.src_path in exp_list[node].payload:
723
          found = True
724
          self.op.src_node = node
725
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
726
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
727
                                            self.op.src_path)
728
          break
729
      if not found:
730
        raise errors.OpPrereqError("No export found for relative path %s" %
731
                                   self.op.src_path, errors.ECODE_INVAL)
732

    
733
    CheckNodeOnline(self, self.op.src_node_uuid)
734
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
735
    result.Raise("No export or invalid export found in dir %s" %
736
                 self.op.src_path)
737

    
738
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
739
    if not export_info.has_section(constants.INISECT_EXP):
740
      raise errors.ProgrammerError("Corrupted export config",
741
                                   errors.ECODE_ENVIRON)
742

    
743
    ei_version = export_info.get(constants.INISECT_EXP, "version")
744
    if int(ei_version) != constants.EXPORT_VERSION:
745
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
746
                                 (ei_version, constants.EXPORT_VERSION),
747
                                 errors.ECODE_ENVIRON)
748
    return export_info
749

    
750
  def _ReadExportParams(self, einfo):
751
    """Use export parameters as defaults.
752

753
    In case the opcode doesn't specify (as in override) some instance
754
    parameters, then try to use them from the export information, if
755
    that declares them.
756

757
    """
758
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
759

    
760
    if not self.op.disks:
761
      disks = []
762
      # TODO: import the disk iv_name too
763
      for idx in range(constants.MAX_DISKS):
764
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
765
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
766
          disks.append({constants.IDISK_SIZE: disk_sz})
767
      self.op.disks = disks
768
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
769
        raise errors.OpPrereqError("No disk info specified and the export"
770
                                   " is missing the disk information",
771
                                   errors.ECODE_INVAL)
772

    
773
    if not self.op.nics:
774
      nics = []
775
      for idx in range(constants.MAX_NICS):
776
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
777
          ndict = {}
778
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
779
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
780
            ndict[name] = v
781
          nics.append(ndict)
782
        else:
783
          break
784
      self.op.nics = nics
785

    
786
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
787
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
788

    
789
    if (self.op.hypervisor is None and
790
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
791
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
792

    
793
    if einfo.has_section(constants.INISECT_HYP):
794
      # use the export parameters but do not override the ones
795
      # specified by the user
796
      for name, value in einfo.items(constants.INISECT_HYP):
797
        if name not in self.op.hvparams:
798
          self.op.hvparams[name] = value
799

    
800
    if einfo.has_section(constants.INISECT_BEP):
801
      # use the parameters, without overriding
802
      for name, value in einfo.items(constants.INISECT_BEP):
803
        if name not in self.op.beparams:
804
          self.op.beparams[name] = value
805
        # Compatibility for the old "memory" be param
806
        if name == constants.BE_MEMORY:
807
          if constants.BE_MAXMEM not in self.op.beparams:
808
            self.op.beparams[constants.BE_MAXMEM] = value
809
          if constants.BE_MINMEM not in self.op.beparams:
810
            self.op.beparams[constants.BE_MINMEM] = value
811
    else:
812
      # try to read the parameters old style, from the main section
813
      for name in constants.BES_PARAMETERS:
814
        if (name not in self.op.beparams and
815
            einfo.has_option(constants.INISECT_INS, name)):
816
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
817

    
818
    if einfo.has_section(constants.INISECT_OSP):
819
      # use the parameters, without overriding
820
      for name, value in einfo.items(constants.INISECT_OSP):
821
        if name not in self.op.osparams:
822
          self.op.osparams[name] = value
823

    
824
  def _RevertToDefaults(self, cluster):
825
    """Revert the instance parameters to the default values.
826

827
    """
828
    # hvparams
829
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
830
    for name in self.op.hvparams.keys():
831
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
832
        del self.op.hvparams[name]
833
    # beparams
834
    be_defs = cluster.SimpleFillBE({})
835
    for name in self.op.beparams.keys():
836
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
837
        del self.op.beparams[name]
838
    # nic params
839
    nic_defs = cluster.SimpleFillNIC({})
840
    for nic in self.op.nics:
841
      for name in constants.NICS_PARAMETERS:
842
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
843
          del nic[name]
844
    # osparams
845
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
846
    for name in self.op.osparams.keys():
847
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
848
        del self.op.osparams[name]
849

    
850
  def _CalculateFileStorageDir(self):
851
    """Calculate final instance file storage dir.
852

853
    """
854
    # file storage dir calculation/check
855
    self.instance_file_storage_dir = None
856
    if self.op.disk_template in constants.DTS_FILEBASED:
857
      # build the full file storage dir path
858
      joinargs = []
859

    
860
      if self.op.disk_template == constants.DT_SHARED_FILE:
861
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
862
      else:
863
        get_fsd_fn = self.cfg.GetFileStorageDir
864

    
865
      cfg_storagedir = get_fsd_fn()
866
      if not cfg_storagedir:
867
        raise errors.OpPrereqError("Cluster file storage dir not defined",
868
                                   errors.ECODE_STATE)
869
      joinargs.append(cfg_storagedir)
870

    
871
      if self.op.file_storage_dir is not None:
872
        joinargs.append(self.op.file_storage_dir)
873

    
874
      joinargs.append(self.op.instance_name)
875

    
876
      # pylint: disable=W0142
877
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
878

    
879
  def CheckPrereq(self): # pylint: disable=R0914
880
    """Check prerequisites.
881

882
    """
883
    self._CalculateFileStorageDir()
884

    
885
    if self.op.mode == constants.INSTANCE_IMPORT:
886
      export_info = self._ReadExportInfo()
887
      self._ReadExportParams(export_info)
888
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
889
    else:
890
      self._old_instance_name = None
891

    
892
    if (not self.cfg.GetVGName() and
893
        self.op.disk_template not in constants.DTS_NOT_LVM):
894
      raise errors.OpPrereqError("Cluster does not support lvm-based"
895
                                 " instances", errors.ECODE_STATE)
896

    
897
    if (self.op.hypervisor is None or
898
        self.op.hypervisor == constants.VALUE_AUTO):
899
      self.op.hypervisor = self.cfg.GetHypervisorType()
900

    
901
    cluster = self.cfg.GetClusterInfo()
902
    enabled_hvs = cluster.enabled_hypervisors
903
    if self.op.hypervisor not in enabled_hvs:
904
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
905
                                 " cluster (%s)" %
906
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
907
                                 errors.ECODE_STATE)
908

    
909
    # Check tag validity
910
    for tag in self.op.tags:
911
      objects.TaggableObject.ValidateTag(tag)
912

    
913
    # check hypervisor parameter syntax (locally)
914
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
915
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
916
                                      self.op.hvparams)
917
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
918
    hv_type.CheckParameterSyntax(filled_hvp)
919
    self.hv_full = filled_hvp
920
    # check that we don't specify global parameters on an instance
921
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
922
                         "instance", "cluster")
923

    
924
    # fill and remember the beparams dict
925
    self.be_full = _ComputeFullBeParams(self.op, cluster)
926

    
927
    # build os parameters
928
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
929

    
930
    # now that hvp/bep are in final format, let's reset to defaults,
931
    # if told to do so
932
    if self.op.identify_defaults:
933
      self._RevertToDefaults(cluster)
934

    
935
    # NIC buildup
936
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
937
                             self.proc.GetECId())
938

    
939
    # disk checks/pre-build
940
    default_vg = self.cfg.GetVGName()
941
    self.disks = ComputeDisks(self.op, default_vg)
942

    
943
    if self.op.mode == constants.INSTANCE_IMPORT:
944
      disk_images = []
945
      for idx in range(len(self.disks)):
946
        option = "disk%d_dump" % idx
947
        if export_info.has_option(constants.INISECT_INS, option):
948
          # FIXME: are the old os-es, disk sizes, etc. useful?
949
          export_name = export_info.get(constants.INISECT_INS, option)
950
          image = utils.PathJoin(self.op.src_path, export_name)
951
          disk_images.append(image)
952
        else:
953
          disk_images.append(False)
954

    
955
      self.src_images = disk_images
956

    
957
      if self.op.instance_name == self._old_instance_name:
958
        for idx, nic in enumerate(self.nics):
959
          if nic.mac == constants.VALUE_AUTO:
960
            nic_mac_ini = "nic%d_mac" % idx
961
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
962

    
963
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
964

    
965
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
966
    if self.op.ip_check:
967
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
968
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
969
                                   (self.check_ip, self.op.instance_name),
970
                                   errors.ECODE_NOTUNIQUE)
971

    
972
    #### mac address generation
973
    # By generating here the mac address both the allocator and the hooks get
974
    # the real final mac address rather than the 'auto' or 'generate' value.
975
    # There is a race condition between the generation and the instance object
976
    # creation, which means that we know the mac is valid now, but we're not
977
    # sure it will be when we actually add the instance. If things go bad
978
    # adding the instance will abort because of a duplicate mac, and the
979
    # creation job will fail.
980
    for nic in self.nics:
981
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
982
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
983

    
984
    #### allocator run
985

    
986
    if self.op.iallocator is not None:
987
      self._RunAllocator()
988

    
989
    # Release all unneeded node locks
990
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
991
                               self.op.src_node_uuid])
992
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
993
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
994
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
995

    
996
    assert (self.owned_locks(locking.LEVEL_NODE) ==
997
            self.owned_locks(locking.LEVEL_NODE_RES)), \
998
      "Node locks differ from node resource locks"
999

    
1000
    #### node related checks
1001

    
1002
    # check primary node
1003
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1004
    assert self.pnode is not None, \
1005
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1006
    if pnode.offline:
1007
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1008
                                 pnode.name, errors.ECODE_STATE)
1009
    if pnode.drained:
1010
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1011
                                 pnode.name, errors.ECODE_STATE)
1012
    if not pnode.vm_capable:
1013
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1014
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1015

    
1016
    self.secondaries = []
1017

    
1018
    # Fill in any IPs from IP pools. This must happen here, because we need to
1019
    # know the nic's primary node, as specified by the iallocator
1020
    for idx, nic in enumerate(self.nics):
1021
      net_uuid = nic.network
1022
      if net_uuid is not None:
1023
        nobj = self.cfg.GetNetwork(net_uuid)
1024
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1025
        if netparams is None:
1026
          raise errors.OpPrereqError("No netparams found for network"
1027
                                     " %s. Propably not connected to"
1028
                                     " node's %s nodegroup" %
1029
                                     (nobj.name, self.pnode.name),
1030
                                     errors.ECODE_INVAL)
1031
        self.LogInfo("NIC/%d inherits netparams %s" %
1032
                     (idx, netparams.values()))
1033
        nic.nicparams = dict(netparams)
1034
        if nic.ip is not None:
1035
          if nic.ip.lower() == constants.NIC_IP_POOL:
1036
            try:
1037
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1038
            except errors.ReservationError:
1039
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1040
                                         " from the address pool" % idx,
1041
                                         errors.ECODE_STATE)
1042
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1043
          else:
1044
            try:
1045
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1046
            except errors.ReservationError:
1047
              raise errors.OpPrereqError("IP address %s already in use"
1048
                                         " or does not belong to network %s" %
1049
                                         (nic.ip, nobj.name),
1050
                                         errors.ECODE_NOTUNIQUE)
1051

    
1052
      # net is None, ip None or given
1053
      elif self.op.conflicts_check:
1054
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1055

    
1056
    # mirror node verification
1057
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1058
      if self.op.snode_uuid == pnode.uuid:
1059
        raise errors.OpPrereqError("The secondary node cannot be the"
1060
                                   " primary node", errors.ECODE_INVAL)
1061
      CheckNodeOnline(self, self.op.snode_uuid)
1062
      CheckNodeNotDrained(self, self.op.snode_uuid)
1063
      CheckNodeVmCapable(self, self.op.snode_uuid)
1064
      self.secondaries.append(self.op.snode_uuid)
1065

    
1066
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1067
      if pnode.group != snode.group:
1068
        self.LogWarning("The primary and secondary nodes are in two"
1069
                        " different node groups; the disk parameters"
1070
                        " from the first disk's node group will be"
1071
                        " used")
1072

    
1073
    nodes = [pnode]
1074
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1075
      nodes.append(snode)
1076
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1077
    excl_stor = compat.any(map(has_es, nodes))
1078
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1079
      raise errors.OpPrereqError("Disk template %s not supported with"
1080
                                 " exclusive storage" % self.op.disk_template,
1081
                                 errors.ECODE_STATE)
1082
    for disk in self.disks:
1083
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1084

    
1085
    node_uuids = [pnode.uuid] + self.secondaries
1086

    
1087
    if not self.adopt_disks:
1088
      if self.op.disk_template == constants.DT_RBD:
1089
        # _CheckRADOSFreeSpace() is just a placeholder.
1090
        # Any function that checks prerequisites can be placed here.
1091
        # Check if there is enough space on the RADOS cluster.
1092
        CheckRADOSFreeSpace()
1093
      elif self.op.disk_template == constants.DT_EXT:
1094
        # FIXME: Function that checks prereqs if needed
1095
        pass
1096
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1097
        # Check lv size requirements, if not adopting
1098
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1099
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1100
      else:
1101
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1102
        pass
1103

    
1104
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1105
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1106
                                disk[constants.IDISK_ADOPT])
1107
                     for disk in self.disks])
1108
      if len(all_lvs) != len(self.disks):
1109
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1110
                                   errors.ECODE_INVAL)
1111
      for lv_name in all_lvs:
1112
        try:
1113
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1114
          # to ReserveLV uses the same syntax
1115
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1116
        except errors.ReservationError:
1117
          raise errors.OpPrereqError("LV named %s used by another instance" %
1118
                                     lv_name, errors.ECODE_NOTUNIQUE)
1119

    
1120
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1121
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1122

    
1123
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1124
                                       vg_names.payload.keys())[pnode.uuid]
1125
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1126
      node_lvs = node_lvs.payload
1127

    
1128
      delta = all_lvs.difference(node_lvs.keys())
1129
      if delta:
1130
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1131
                                   utils.CommaJoin(delta),
1132
                                   errors.ECODE_INVAL)
1133
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1134
      if online_lvs:
1135
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1136
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1137
                                   errors.ECODE_STATE)
1138
      # update the size of disk based on what is found
1139
      for dsk in self.disks:
1140
        dsk[constants.IDISK_SIZE] = \
1141
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1142
                                        dsk[constants.IDISK_ADOPT])][0]))
1143

    
1144
    elif self.op.disk_template == constants.DT_BLOCK:
1145
      # Normalize and de-duplicate device paths
1146
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1147
                       for disk in self.disks])
1148
      if len(all_disks) != len(self.disks):
1149
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1150
                                   errors.ECODE_INVAL)
1151
      baddisks = [d for d in all_disks
1152
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1153
      if baddisks:
1154
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1155
                                   " cannot be adopted" %
1156
                                   (utils.CommaJoin(baddisks),
1157
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1158
                                   errors.ECODE_INVAL)
1159

    
1160
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1161
                                            list(all_disks))[pnode.uuid]
1162
      node_disks.Raise("Cannot get block device information from node %s" %
1163
                       pnode.name)
1164
      node_disks = node_disks.payload
1165
      delta = all_disks.difference(node_disks.keys())
1166
      if delta:
1167
        raise errors.OpPrereqError("Missing block device(s): %s" %
1168
                                   utils.CommaJoin(delta),
1169
                                   errors.ECODE_INVAL)
1170
      for dsk in self.disks:
1171
        dsk[constants.IDISK_SIZE] = \
1172
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1173

    
1174
    # Verify instance specs
1175
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1176
    ispec = {
1177
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1178
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1179
      constants.ISPEC_DISK_COUNT: len(self.disks),
1180
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1181
                                  for disk in self.disks],
1182
      constants.ISPEC_NIC_COUNT: len(self.nics),
1183
      constants.ISPEC_SPINDLE_USE: spindle_use,
1184
      }
1185

    
1186
    group_info = self.cfg.GetNodeGroup(pnode.group)
1187
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1188
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1189
                                               self.op.disk_template)
1190
    if not self.op.ignore_ipolicy and res:
1191
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1192
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1193
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1194

    
1195
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1196

    
1197
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1198
    # check OS parameters (remotely)
1199
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1200

    
1201
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1202

    
1203
    #TODO: _CheckExtParams (remotely)
1204
    # Check parameters for extstorage
1205

    
1206
    # memory check on primary node
1207
    #TODO(dynmem): use MINMEM for checking
1208
    if self.op.start:
1209
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1210
                                self.op.hvparams)
1211
      CheckNodeFreeMemory(self, self.pnode.uuid,
1212
                          "creating instance %s" % self.op.instance_name,
1213
                          self.be_full[constants.BE_MAXMEM],
1214
                          self.op.hypervisor, hvfull)
1215

    
1216
    self.dry_run_result = list(node_uuids)
1217

    
1218
  def Exec(self, feedback_fn):
1219
    """Create and add the instance to the cluster.
1220

1221
    """
1222
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1223
                self.owned_locks(locking.LEVEL_NODE)), \
1224
      "Node locks differ from node resource locks"
1225
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1226

    
1227
    ht_kind = self.op.hypervisor
1228
    if ht_kind in constants.HTS_REQ_PORT:
1229
      network_port = self.cfg.AllocatePort()
1230
    else:
1231
      network_port = None
1232

    
1233
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1234

    
1235
    # This is ugly but we got a chicken-egg problem here
1236
    # We can only take the group disk parameters, as the instance
1237
    # has no disks yet (we are generating them right here).
1238
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1239
    disks = GenerateDiskTemplate(self,
1240
                                 self.op.disk_template,
1241
                                 instance_uuid, self.pnode.uuid,
1242
                                 self.secondaries,
1243
                                 self.disks,
1244
                                 self.instance_file_storage_dir,
1245
                                 self.op.file_driver,
1246
                                 0,
1247
                                 feedback_fn,
1248
                                 self.cfg.GetGroupDiskParams(nodegroup))
1249

    
1250
    iobj = objects.Instance(name=self.op.instance_name,
1251
                            uuid=instance_uuid,
1252
                            os=self.op.os_type,
1253
                            primary_node=self.pnode.uuid,
1254
                            nics=self.nics, disks=disks,
1255
                            disk_template=self.op.disk_template,
1256
                            disks_active=False,
1257
                            admin_state=constants.ADMINST_DOWN,
1258
                            network_port=network_port,
1259
                            beparams=self.op.beparams,
1260
                            hvparams=self.op.hvparams,
1261
                            hypervisor=self.op.hypervisor,
1262
                            osparams=self.op.osparams,
1263
                            )
1264

    
1265
    if self.op.tags:
1266
      for tag in self.op.tags:
1267
        iobj.AddTag(tag)
1268

    
1269
    if self.adopt_disks:
1270
      if self.op.disk_template == constants.DT_PLAIN:
1271
        # rename LVs to the newly-generated names; we need to construct
1272
        # 'fake' LV disks with the old data, plus the new unique_id
1273
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1274
        rename_to = []
1275
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1276
          rename_to.append(t_dsk.logical_id)
1277
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1278
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1279
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1280
                                               zip(tmp_disks, rename_to))
1281
        result.Raise("Failed to rename adoped LVs")
1282
    else:
1283
      feedback_fn("* creating instance disks...")
1284
      try:
1285
        CreateDisks(self, iobj)
1286
      except errors.OpExecError:
1287
        self.LogWarning("Device creation failed")
1288
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1289
        raise
1290

    
1291
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1292

    
1293
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1294

    
1295
    # Declare that we don't want to remove the instance lock anymore, as we've
1296
    # added the instance to the config
1297
    del self.remove_locks[locking.LEVEL_INSTANCE]
1298

    
1299
    if self.op.mode == constants.INSTANCE_IMPORT:
1300
      # Release unused nodes
1301
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1302
    else:
1303
      # Release all nodes
1304
      ReleaseLocks(self, locking.LEVEL_NODE)
1305

    
1306
    disk_abort = False
1307
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1308
      feedback_fn("* wiping instance disks...")
1309
      try:
1310
        WipeDisks(self, iobj)
1311
      except errors.OpExecError, err:
1312
        logging.exception("Wiping disks failed")
1313
        self.LogWarning("Wiping instance disks failed (%s)", err)
1314
        disk_abort = True
1315

    
1316
    if disk_abort:
1317
      # Something is already wrong with the disks, don't do anything else
1318
      pass
1319
    elif self.op.wait_for_sync:
1320
      disk_abort = not WaitForSync(self, iobj)
1321
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1322
      # make sure the disks are not degraded (still sync-ing is ok)
1323
      feedback_fn("* checking mirrors status")
1324
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1325
    else:
1326
      disk_abort = False
1327

    
1328
    if disk_abort:
1329
      RemoveDisks(self, iobj)
1330
      self.cfg.RemoveInstance(iobj.uuid)
1331
      # Make sure the instance lock gets removed
1332
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1333
      raise errors.OpExecError("There are some degraded disks for"
1334
                               " this instance")
1335

    
1336
    # instance disks are now active
1337
    iobj.disks_active = True
1338

    
1339
    # Release all node resource locks
1340
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1341

    
1342
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1343
      # we need to set the disks ID to the primary node, since the
1344
      # preceding code might or might have not done it, depending on
1345
      # disk template and other options
1346
      for disk in iobj.disks:
1347
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1348
      if self.op.mode == constants.INSTANCE_CREATE:
1349
        if not self.op.no_install:
1350
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1351
                        not self.op.wait_for_sync)
1352
          if pause_sync:
1353
            feedback_fn("* pausing disk sync to install instance OS")
1354
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1355
                                                              (iobj.disks,
1356
                                                               iobj), True)
1357
            for idx, success in enumerate(result.payload):
1358
              if not success:
1359
                logging.warn("pause-sync of instance %s for disk %d failed",
1360
                             self.op.instance_name, idx)
1361

    
1362
          feedback_fn("* running the instance OS create scripts...")
1363
          # FIXME: pass debug option from opcode to backend
1364
          os_add_result = \
1365
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1366
                                          self.op.debug_level)
1367
          if pause_sync:
1368
            feedback_fn("* resuming disk sync")
1369
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1370
                                                              (iobj.disks,
1371
                                                               iobj), False)
1372
            for idx, success in enumerate(result.payload):
1373
              if not success:
1374
                logging.warn("resume-sync of instance %s for disk %d failed",
1375
                             self.op.instance_name, idx)
1376

    
1377
          os_add_result.Raise("Could not add os for instance %s"
1378
                              " on node %s" % (self.op.instance_name,
1379
                                               self.pnode.name))
1380

    
1381
      else:
1382
        if self.op.mode == constants.INSTANCE_IMPORT:
1383
          feedback_fn("* running the instance OS import scripts...")
1384

    
1385
          transfers = []
1386

    
1387
          for idx, image in enumerate(self.src_images):
1388
            if not image:
1389
              continue
1390

    
1391
            # FIXME: pass debug option from opcode to backend
1392
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1393
                                               constants.IEIO_FILE, (image, ),
1394
                                               constants.IEIO_SCRIPT,
1395
                                               (iobj.disks[idx], idx),
1396
                                               None)
1397
            transfers.append(dt)
1398

    
1399
          import_result = \
1400
            masterd.instance.TransferInstanceData(self, feedback_fn,
1401
                                                  self.op.src_node_uuid,
1402
                                                  self.pnode.uuid,
1403
                                                  self.pnode.secondary_ip,
1404
                                                  iobj, transfers)
1405
          if not compat.all(import_result):
1406
            self.LogWarning("Some disks for instance %s on node %s were not"
1407
                            " imported successfully" % (self.op.instance_name,
1408
                                                        self.pnode.name))
1409

    
1410
          rename_from = self._old_instance_name
1411

    
1412
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1413
          feedback_fn("* preparing remote import...")
1414
          # The source cluster will stop the instance before attempting to make
1415
          # a connection. In some cases stopping an instance can take a long
1416
          # time, hence the shutdown timeout is added to the connection
1417
          # timeout.
1418
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1419
                             self.op.source_shutdown_timeout)
1420
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1421

    
1422
          assert iobj.primary_node == self.pnode.uuid
1423
          disk_results = \
1424
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1425
                                          self.source_x509_ca,
1426
                                          self._cds, timeouts)
1427
          if not compat.all(disk_results):
1428
            # TODO: Should the instance still be started, even if some disks
1429
            # failed to import (valid for local imports, too)?
1430
            self.LogWarning("Some disks for instance %s on node %s were not"
1431
                            " imported successfully" % (self.op.instance_name,
1432
                                                        self.pnode.name))
1433

    
1434
          rename_from = self.source_instance_name
1435

    
1436
        else:
1437
          # also checked in the prereq part
1438
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1439
                                       % self.op.mode)
1440

    
1441
        # Run rename script on newly imported instance
1442
        assert iobj.name == self.op.instance_name
1443
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1444
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1445
                                                   rename_from,
1446
                                                   self.op.debug_level)
1447
        result.Warn("Failed to run rename script for %s on node %s" %
1448
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1449

    
1450
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1451

    
1452
    if self.op.start:
1453
      iobj.admin_state = constants.ADMINST_UP
1454
      self.cfg.Update(iobj, feedback_fn)
1455
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1456
                   self.pnode.name)
1457
      feedback_fn("* starting instance...")
1458
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1459
                                            False, self.op.reason)
1460
      result.Raise("Could not start instance")
1461

    
1462
    return list(iobj.all_nodes)
1463

    
1464

    
1465
class LUInstanceRename(LogicalUnit):
1466
  """Rename an instance.
1467

1468
  """
1469
  HPATH = "instance-rename"
1470
  HTYPE = constants.HTYPE_INSTANCE
1471

    
1472
  def CheckArguments(self):
1473
    """Check arguments.
1474

1475
    """
1476
    if self.op.ip_check and not self.op.name_check:
1477
      # TODO: make the ip check more flexible and not depend on the name check
1478
      raise errors.OpPrereqError("IP address check requires a name check",
1479
                                 errors.ECODE_INVAL)
1480

    
1481
  def BuildHooksEnv(self):
1482
    """Build hooks env.
1483

1484
    This runs on master, primary and secondary nodes of the instance.
1485

1486
    """
1487
    env = BuildInstanceHookEnvByObject(self, self.instance)
1488
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1489
    return env
1490

    
1491
  def BuildHooksNodes(self):
1492
    """Build hooks nodes.
1493

1494
    """
1495
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1496
    return (nl, nl)
1497

    
1498
  def CheckPrereq(self):
1499
    """Check prerequisites.
1500

1501
    This checks that the instance is in the cluster and is not running.
1502

1503
    """
1504
    (self.op.instance_uuid, self.op.instance_name) = \
1505
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1506
                                self.op.instance_name)
1507
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1508
    assert instance is not None
1509

    
1510
    # It should actually not happen that an instance is running with a disabled
1511
    # disk template, but in case it does, the renaming of file-based instances
1512
    # will fail horribly. Thus, we test it before.
1513
    if (instance.disk_template in constants.DTS_FILEBASED and
1514
        self.op.new_name != instance.name):
1515
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1516
                               instance.disk_template)
1517

    
1518
    CheckNodeOnline(self, instance.primary_node)
1519
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1520
                       msg="cannot rename")
1521
    self.instance = instance
1522

    
1523
    new_name = self.op.new_name
1524
    if self.op.name_check:
1525
      hostname = _CheckHostnameSane(self, new_name)
1526
      new_name = self.op.new_name = hostname.name
1527
      if (self.op.ip_check and
1528
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1529
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1530
                                   (hostname.ip, new_name),
1531
                                   errors.ECODE_NOTUNIQUE)
1532

    
1533
    instance_names = [inst.name for
1534
                      inst in self.cfg.GetAllInstancesInfo().values()]
1535
    if new_name in instance_names and new_name != instance.name:
1536
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1537
                                 new_name, errors.ECODE_EXISTS)
1538

    
1539
  def Exec(self, feedback_fn):
1540
    """Rename the instance.
1541

1542
    """
1543
    old_name = self.instance.name
1544

    
1545
    rename_file_storage = False
1546
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1547
        self.op.new_name != self.instance.name):
1548
      old_file_storage_dir = os.path.dirname(
1549
                               self.instance.disks[0].logical_id[1])
1550
      rename_file_storage = True
1551

    
1552
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1553
    # Change the instance lock. This is definitely safe while we hold the BGL.
1554
    # Otherwise the new lock would have to be added in acquired mode.
1555
    assert self.REQ_BGL
1556
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1557
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1558
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1559

    
1560
    # re-read the instance from the configuration after rename
1561
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1562

    
1563
    if rename_file_storage:
1564
      new_file_storage_dir = os.path.dirname(
1565
                               renamed_inst.disks[0].logical_id[1])
1566
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1567
                                                     old_file_storage_dir,
1568
                                                     new_file_storage_dir)
1569
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1570
                   " (but the instance has been renamed in Ganeti)" %
1571
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1572
                    old_file_storage_dir, new_file_storage_dir))
1573

    
1574
    StartInstanceDisks(self, renamed_inst, None)
1575
    # update info on disks
1576
    info = GetInstanceInfoText(renamed_inst)
1577
    for (idx, disk) in enumerate(renamed_inst.disks):
1578
      for node_uuid in renamed_inst.all_nodes:
1579
        self.cfg.SetDiskID(disk, node_uuid)
1580
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1581
        result.Warn("Error setting info on node %s for disk %s" %
1582
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1583
    try:
1584
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1585
                                                 renamed_inst, old_name,
1586
                                                 self.op.debug_level)
1587
      result.Warn("Could not run OS rename script for instance %s on node %s"
1588
                  " (but the instance has been renamed in Ganeti)" %
1589
                  (renamed_inst.name,
1590
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1591
                  self.LogWarning)
1592
    finally:
1593
      ShutdownInstanceDisks(self, renamed_inst)
1594

    
1595
    return renamed_inst.name
1596

    
1597

    
1598
class LUInstanceRemove(LogicalUnit):
1599
  """Remove an instance.
1600

1601
  """
1602
  HPATH = "instance-remove"
1603
  HTYPE = constants.HTYPE_INSTANCE
1604
  REQ_BGL = False
1605

    
1606
  def ExpandNames(self):
1607
    self._ExpandAndLockInstance()
1608
    self.needed_locks[locking.LEVEL_NODE] = []
1609
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1610
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1611

    
1612
  def DeclareLocks(self, level):
1613
    if level == locking.LEVEL_NODE:
1614
      self._LockInstancesNodes()
1615
    elif level == locking.LEVEL_NODE_RES:
1616
      # Copy node locks
1617
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1618
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1619

    
1620
  def BuildHooksEnv(self):
1621
    """Build hooks env.
1622

1623
    This runs on master, primary and secondary nodes of the instance.
1624

1625
    """
1626
    env = BuildInstanceHookEnvByObject(self, self.instance)
1627
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1628
    return env
1629

    
1630
  def BuildHooksNodes(self):
1631
    """Build hooks nodes.
1632

1633
    """
1634
    nl = [self.cfg.GetMasterNode()]
1635
    nl_post = list(self.instance.all_nodes) + nl
1636
    return (nl, nl_post)
1637

    
1638
  def CheckPrereq(self):
1639
    """Check prerequisites.
1640

1641
    This checks that the instance is in the cluster.
1642

1643
    """
1644
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1645
    assert self.instance is not None, \
1646
      "Cannot retrieve locked instance %s" % self.op.instance_name
1647

    
1648
  def Exec(self, feedback_fn):
1649
    """Remove the instance.
1650

1651
    """
1652
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1653
                 self.cfg.GetNodeName(self.instance.primary_node))
1654

    
1655
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1656
                                             self.instance,
1657
                                             self.op.shutdown_timeout,
1658
                                             self.op.reason)
1659
    if self.op.ignore_failures:
1660
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1661
    else:
1662
      result.Raise("Could not shutdown instance %s on node %s" %
1663
                   (self.instance.name,
1664
                    self.cfg.GetNodeName(self.instance.primary_node)))
1665

    
1666
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1667
            self.owned_locks(locking.LEVEL_NODE_RES))
1668
    assert not (set(self.instance.all_nodes) -
1669
                self.owned_locks(locking.LEVEL_NODE)), \
1670
      "Not owning correct locks"
1671

    
1672
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1673

    
1674

    
1675
class LUInstanceMove(LogicalUnit):
1676
  """Move an instance by data-copying.
1677

1678
  """
1679
  HPATH = "instance-move"
1680
  HTYPE = constants.HTYPE_INSTANCE
1681
  REQ_BGL = False
1682

    
1683
  def ExpandNames(self):
1684
    self._ExpandAndLockInstance()
1685
    (self.op.target_node_uuid, self.op.target_node) = \
1686
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1687
                            self.op.target_node)
1688
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1689
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1690
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1691

    
1692
  def DeclareLocks(self, level):
1693
    if level == locking.LEVEL_NODE:
1694
      self._LockInstancesNodes(primary_only=True)
1695
    elif level == locking.LEVEL_NODE_RES:
1696
      # Copy node locks
1697
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1698
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1699

    
1700
  def BuildHooksEnv(self):
1701
    """Build hooks env.
1702

1703
    This runs on master, primary and secondary nodes of the instance.
1704

1705
    """
1706
    env = {
1707
      "TARGET_NODE": self.op.target_node,
1708
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1709
      }
1710
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1711
    return env
1712

    
1713
  def BuildHooksNodes(self):
1714
    """Build hooks nodes.
1715

1716
    """
1717
    nl = [
1718
      self.cfg.GetMasterNode(),
1719
      self.instance.primary_node,
1720
      self.op.target_node_uuid,
1721
      ]
1722
    return (nl, nl)
1723

    
1724
  def CheckPrereq(self):
1725
    """Check prerequisites.
1726

1727
    This checks that the instance is in the cluster.
1728

1729
    """
1730
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1731
    assert self.instance is not None, \
1732
      "Cannot retrieve locked instance %s" % self.op.instance_name
1733

    
1734
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1735
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1736
                                 self.instance.disk_template,
1737
                                 errors.ECODE_STATE)
1738

    
1739
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1740
    assert target_node is not None, \
1741
      "Cannot retrieve locked node %s" % self.op.target_node
1742

    
1743
    self.target_node_uuid = target_node.uuid
1744
    if target_node.uuid == self.instance.primary_node:
1745
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1746
                                 (self.instance.name, target_node.name),
1747
                                 errors.ECODE_STATE)
1748

    
1749
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1750

    
1751
    for idx, dsk in enumerate(self.instance.disks):
1752
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1753
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1754
                                   " cannot copy" % idx, errors.ECODE_STATE)
1755

    
1756
    CheckNodeOnline(self, target_node.uuid)
1757
    CheckNodeNotDrained(self, target_node.uuid)
1758
    CheckNodeVmCapable(self, target_node.uuid)
1759
    cluster = self.cfg.GetClusterInfo()
1760
    group_info = self.cfg.GetNodeGroup(target_node.group)
1761
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1762
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1763
                           ignore=self.op.ignore_ipolicy)
1764

    
1765
    if self.instance.admin_state == constants.ADMINST_UP:
1766
      # check memory requirements on the secondary node
1767
      CheckNodeFreeMemory(
1768
          self, target_node.uuid, "failing over instance %s" %
1769
          self.instance.name, bep[constants.BE_MAXMEM],
1770
          self.instance.hypervisor,
1771
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1772
    else:
1773
      self.LogInfo("Not checking memory on the secondary node as"
1774
                   " instance will not be started")
1775

    
1776
    # check bridge existance
1777
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1778

    
1779
  def Exec(self, feedback_fn):
1780
    """Move an instance.
1781

1782
    The move is done by shutting it down on its present node, copying
1783
    the data over (slow) and starting it on the new node.
1784

1785
    """
1786
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1787
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1788

    
1789
    self.LogInfo("Shutting down instance %s on source node %s",
1790
                 self.instance.name, source_node.name)
1791

    
1792
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1793
            self.owned_locks(locking.LEVEL_NODE_RES))
1794

    
1795
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1796
                                             self.op.shutdown_timeout,
1797
                                             self.op.reason)
1798
    if self.op.ignore_consistency:
1799
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1800
                  " anyway. Please make sure node %s is down. Error details" %
1801
                  (self.instance.name, source_node.name, source_node.name),
1802
                  self.LogWarning)
1803
    else:
1804
      result.Raise("Could not shutdown instance %s on node %s" %
1805
                   (self.instance.name, source_node.name))
1806

    
1807
    # create the target disks
1808
    try:
1809
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1810
    except errors.OpExecError:
1811
      self.LogWarning("Device creation failed")
1812
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1813
      raise
1814

    
1815
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1816

    
1817
    errs = []
1818
    # activate, get path, copy the data over
1819
    for idx, disk in enumerate(self.instance.disks):
1820
      self.LogInfo("Copying data for disk %d", idx)
1821
      result = self.rpc.call_blockdev_assemble(
1822
                 target_node.uuid, (disk, self.instance), self.instance.name,
1823
                 True, idx)
1824
      if result.fail_msg:
1825
        self.LogWarning("Can't assemble newly created disk %d: %s",
1826
                        idx, result.fail_msg)
1827
        errs.append(result.fail_msg)
1828
        break
1829
      dev_path = result.payload
1830
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1831
                                                                self.instance),
1832
                                             target_node.name, dev_path,
1833
                                             cluster_name)
1834
      if result.fail_msg:
1835
        self.LogWarning("Can't copy data over for disk %d: %s",
1836
                        idx, result.fail_msg)
1837
        errs.append(result.fail_msg)
1838
        break
1839

    
1840
    if errs:
1841
      self.LogWarning("Some disks failed to copy, aborting")
1842
      try:
1843
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1844
      finally:
1845
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1846
        raise errors.OpExecError("Errors during disk copy: %s" %
1847
                                 (",".join(errs),))
1848

    
1849
    self.instance.primary_node = target_node.uuid
1850
    self.cfg.Update(self.instance, feedback_fn)
1851

    
1852
    self.LogInfo("Removing the disks on the original node")
1853
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1854

    
1855
    # Only start the instance if it's marked as up
1856
    if self.instance.admin_state == constants.ADMINST_UP:
1857
      self.LogInfo("Starting instance %s on node %s",
1858
                   self.instance.name, target_node.name)
1859

    
1860
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1861
                                          ignore_secondaries=True)
1862
      if not disks_ok:
1863
        ShutdownInstanceDisks(self, self.instance)
1864
        raise errors.OpExecError("Can't activate the instance's disks")
1865

    
1866
      result = self.rpc.call_instance_start(target_node.uuid,
1867
                                            (self.instance, None, None), False,
1868
                                            self.op.reason)
1869
      msg = result.fail_msg
1870
      if msg:
1871
        ShutdownInstanceDisks(self, self.instance)
1872
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1873
                                 (self.instance.name, target_node.name, msg))
1874

    
1875

    
1876
class LUInstanceMultiAlloc(NoHooksLU):
1877
  """Allocates multiple instances at the same time.
1878

1879
  """
1880
  REQ_BGL = False
1881

    
1882
  def CheckArguments(self):
1883
    """Check arguments.
1884

1885
    """
1886
    nodes = []
1887
    for inst in self.op.instances:
1888
      if inst.iallocator is not None:
1889
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1890
                                   " instance objects", errors.ECODE_INVAL)
1891
      nodes.append(bool(inst.pnode))
1892
      if inst.disk_template in constants.DTS_INT_MIRROR:
1893
        nodes.append(bool(inst.snode))
1894

    
1895
    has_nodes = compat.any(nodes)
1896
    if compat.all(nodes) ^ has_nodes:
1897
      raise errors.OpPrereqError("There are instance objects providing"
1898
                                 " pnode/snode while others do not",
1899
                                 errors.ECODE_INVAL)
1900

    
1901
    if self.op.iallocator is None:
1902
      default_iallocator = self.cfg.GetDefaultIAllocator()
1903
      if default_iallocator and has_nodes:
1904
        self.op.iallocator = default_iallocator
1905
      else:
1906
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1907
                                   " given and no cluster-wide default"
1908
                                   " iallocator found; please specify either"
1909
                                   " an iallocator or nodes on the instances"
1910
                                   " or set a cluster-wide default iallocator",
1911
                                   errors.ECODE_INVAL)
1912

    
1913
    _CheckOpportunisticLocking(self.op)
1914

    
1915
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1916
    if dups:
1917
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1918
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1919

    
1920
  def ExpandNames(self):
1921
    """Calculate the locks.
1922

1923
    """
1924
    self.share_locks = ShareAll()
1925
    self.needed_locks = {
1926
      # iallocator will select nodes and even if no iallocator is used,
1927
      # collisions with LUInstanceCreate should be avoided
1928
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1929
      }
1930

    
1931
    if self.op.iallocator:
1932
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1933
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1934

    
1935
      if self.op.opportunistic_locking:
1936
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1937
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1938
    else:
1939
      nodeslist = []
1940
      for inst in self.op.instances:
1941
        (inst.pnode_uuid, inst.pnode) = \
1942
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1943
        nodeslist.append(inst.pnode)
1944
        if inst.snode is not None:
1945
          (inst.snode_uuid, inst.snode) = \
1946
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1947
          nodeslist.append(inst.snode)
1948

    
1949
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1950
      # Lock resources of instance's primary and secondary nodes (copy to
1951
      # prevent accidential modification)
1952
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1953

    
1954
  def CheckPrereq(self):
1955
    """Check prerequisite.
1956

1957
    """
1958
    cluster = self.cfg.GetClusterInfo()
1959
    default_vg = self.cfg.GetVGName()
1960
    ec_id = self.proc.GetECId()
1961

    
1962
    if self.op.opportunistic_locking:
1963
      # Only consider nodes for which a lock is held
1964
      node_whitelist = self.cfg.GetNodeNames(
1965
                         list(self.owned_locks(locking.LEVEL_NODE)))
1966
    else:
1967
      node_whitelist = None
1968

    
1969
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1970
                                         _ComputeNics(op, cluster, None,
1971
                                                      self.cfg, ec_id),
1972
                                         _ComputeFullBeParams(op, cluster),
1973
                                         node_whitelist)
1974
             for op in self.op.instances]
1975

    
1976
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1977
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1978

    
1979
    ial.Run(self.op.iallocator)
1980

    
1981
    if not ial.success:
1982
      raise errors.OpPrereqError("Can't compute nodes using"
1983
                                 " iallocator '%s': %s" %
1984
                                 (self.op.iallocator, ial.info),
1985
                                 errors.ECODE_NORES)
1986

    
1987
    self.ia_result = ial.result
1988

    
1989
    if self.op.dry_run:
1990
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1991
        constants.JOB_IDS_KEY: [],
1992
        })
1993

    
1994
  def _ConstructPartialResult(self):
1995
    """Contructs the partial result.
1996

1997
    """
1998
    (allocatable, failed) = self.ia_result
1999
    return {
2000
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
2001
        map(compat.fst, allocatable),
2002
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
2003
      }
2004

    
2005
  def Exec(self, feedback_fn):
2006
    """Executes the opcode.
2007

2008
    """
2009
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
2010
    (allocatable, failed) = self.ia_result
2011

    
2012
    jobs = []
2013
    for (name, node_names) in allocatable:
2014
      op = op2inst.pop(name)
2015

    
2016
      (op.pnode_uuid, op.pnode) = \
2017
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2018
      if len(node_names) > 1:
2019
        (op.snode_uuid, op.snode) = \
2020
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2021

    
2022
      jobs.append([op])
2023

    
2024
    missing = set(op2inst.keys()) - set(failed)
2025
    assert not missing, \
2026
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
2027

    
2028
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2029

    
2030

    
2031
class _InstNicModPrivate:
2032
  """Data structure for network interface modifications.
2033

2034
  Used by L{LUInstanceSetParams}.
2035

2036
  """
2037
  def __init__(self):
2038
    self.params = None
2039
    self.filled = None
2040

    
2041

    
2042
def _PrepareContainerMods(mods, private_fn):
2043
  """Prepares a list of container modifications by adding a private data field.
2044

2045
  @type mods: list of tuples; (operation, index, parameters)
2046
  @param mods: List of modifications
2047
  @type private_fn: callable or None
2048
  @param private_fn: Callable for constructing a private data field for a
2049
    modification
2050
  @rtype: list
2051

2052
  """
2053
  if private_fn is None:
2054
    fn = lambda: None
2055
  else:
2056
    fn = private_fn
2057

    
2058
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2059

    
2060

    
2061
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2062
  """Checks if nodes have enough physical CPUs
2063

2064
  This function checks if all given nodes have the needed number of
2065
  physical CPUs. In case any node has less CPUs or we cannot get the
2066
  information from the node, this function raises an OpPrereqError
2067
  exception.
2068

2069
  @type lu: C{LogicalUnit}
2070
  @param lu: a logical unit from which we get configuration data
2071
  @type node_uuids: C{list}
2072
  @param node_uuids: the list of node UUIDs to check
2073
  @type requested: C{int}
2074
  @param requested: the minimum acceptable number of physical CPUs
2075
  @type hypervisor_specs: list of pairs (string, dict of strings)
2076
  @param hypervisor_specs: list of hypervisor specifications in
2077
      pairs (hypervisor_name, hvparams)
2078
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2079
      or we cannot check the node
2080

2081
  """
2082
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2083
  for node_uuid in node_uuids:
2084
    info = nodeinfo[node_uuid]
2085
    node_name = lu.cfg.GetNodeName(node_uuid)
2086
    info.Raise("Cannot get current information from node %s" % node_name,
2087
               prereq=True, ecode=errors.ECODE_ENVIRON)
2088
    (_, _, (hv_info, )) = info.payload
2089
    num_cpus = hv_info.get("cpu_total", None)
2090
    if not isinstance(num_cpus, int):
2091
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2092
                                 " on node %s, result was '%s'" %
2093
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2094
    if requested > num_cpus:
2095
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2096
                                 "required" % (node_name, num_cpus, requested),
2097
                                 errors.ECODE_NORES)
2098

    
2099

    
2100
def GetItemFromContainer(identifier, kind, container):
2101
  """Return the item refered by the identifier.
2102

2103
  @type identifier: string
2104
  @param identifier: Item index or name or UUID
2105
  @type kind: string
2106
  @param kind: One-word item description
2107
  @type container: list
2108
  @param container: Container to get the item from
2109

2110
  """
2111
  # Index
2112
  try:
2113
    idx = int(identifier)
2114
    if idx == -1:
2115
      # Append
2116
      absidx = len(container) - 1
2117
    elif idx < 0:
2118
      raise IndexError("Not accepting negative indices other than -1")
2119
    elif idx > len(container):
2120
      raise IndexError("Got %s index %s, but there are only %s" %
2121
                       (kind, idx, len(container)))
2122
    else:
2123
      absidx = idx
2124
    return (absidx, container[idx])
2125
  except ValueError:
2126
    pass
2127

    
2128
  for idx, item in enumerate(container):
2129
    if item.uuid == identifier or item.name == identifier:
2130
      return (idx, item)
2131

    
2132
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2133
                             (kind, identifier), errors.ECODE_NOENT)
2134

    
2135

    
2136
def _ApplyContainerMods(kind, container, chgdesc, mods,
2137
                        create_fn, modify_fn, remove_fn):
2138
  """Applies descriptions in C{mods} to C{container}.
2139

2140
  @type kind: string
2141
  @param kind: One-word item description
2142
  @type container: list
2143
  @param container: Container to modify
2144
  @type chgdesc: None or list
2145
  @param chgdesc: List of applied changes
2146
  @type mods: list
2147
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2148
  @type create_fn: callable
2149
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2150
    receives absolute item index, parameters and private data object as added
2151
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2152
    as list
2153
  @type modify_fn: callable
2154
  @param modify_fn: Callback for modifying an existing item
2155
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2156
    and private data object as added by L{_PrepareContainerMods}, returns
2157
    changes as list
2158
  @type remove_fn: callable
2159
  @param remove_fn: Callback on removing item; receives absolute item index,
2160
    item and private data object as added by L{_PrepareContainerMods}
2161

2162
  """
2163
  for (op, identifier, params, private) in mods:
2164
    changes = None
2165

    
2166
    if op == constants.DDM_ADD:
2167
      # Calculate where item will be added
2168
      # When adding an item, identifier can only be an index
2169
      try:
2170
        idx = int(identifier)
2171
      except ValueError:
2172
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2173
                                   " identifier for %s" % constants.DDM_ADD,
2174
                                   errors.ECODE_INVAL)
2175
      if idx == -1:
2176
        addidx = len(container)
2177
      else:
2178
        if idx < 0:
2179
          raise IndexError("Not accepting negative indices other than -1")
2180
        elif idx > len(container):
2181
          raise IndexError("Got %s index %s, but there are only %s" %
2182
                           (kind, idx, len(container)))
2183
        addidx = idx
2184

    
2185
      if create_fn is None:
2186
        item = params
2187
      else:
2188
        (item, changes) = create_fn(addidx, params, private)
2189

    
2190
      if idx == -1:
2191
        container.append(item)
2192
      else:
2193
        assert idx >= 0
2194
        assert idx <= len(container)
2195
        # list.insert does so before the specified index
2196
        container.insert(idx, item)
2197
    else:
2198
      # Retrieve existing item
2199
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2200

    
2201
      if op == constants.DDM_REMOVE:
2202
        assert not params
2203

    
2204
        if remove_fn is not None:
2205
          remove_fn(absidx, item, private)
2206

    
2207
        changes = [("%s/%s" % (kind, absidx), "remove")]
2208

    
2209
        assert container[absidx] == item
2210
        del container[absidx]
2211
      elif op == constants.DDM_MODIFY:
2212
        if modify_fn is not None:
2213
          changes = modify_fn(absidx, item, params, private)
2214
      else:
2215
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2216

    
2217
    assert _TApplyContModsCbChanges(changes)
2218

    
2219
    if not (chgdesc is None or changes is None):
2220
      chgdesc.extend(changes)
2221

    
2222

    
2223
def _UpdateIvNames(base_index, disks):
2224
  """Updates the C{iv_name} attribute of disks.
2225

2226
  @type disks: list of L{objects.Disk}
2227

2228
  """
2229
  for (idx, disk) in enumerate(disks):
2230
    disk.iv_name = "disk/%s" % (base_index + idx, )
2231

    
2232

    
2233
class LUInstanceSetParams(LogicalUnit):
2234
  """Modifies an instances's parameters.
2235

2236
  """
2237
  HPATH = "instance-modify"
2238
  HTYPE = constants.HTYPE_INSTANCE
2239
  REQ_BGL = False
2240

    
2241
  @staticmethod
2242
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2243
    assert ht.TList(mods)
2244
    assert not mods or len(mods[0]) in (2, 3)
2245

    
2246
    if mods and len(mods[0]) == 2:
2247
      result = []
2248

    
2249
      addremove = 0
2250
      for op, params in mods:
2251
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2252
          result.append((op, -1, params))
2253
          addremove += 1
2254

    
2255
          if addremove > 1:
2256
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2257
                                       " supported at a time" % kind,
2258
                                       errors.ECODE_INVAL)
2259
        else:
2260
          result.append((constants.DDM_MODIFY, op, params))
2261

    
2262
      assert verify_fn(result)
2263
    else:
2264
      result = mods
2265

    
2266
    return result
2267

    
2268
  @staticmethod
2269
  def _CheckMods(kind, mods, key_types, item_fn):
2270
    """Ensures requested disk/NIC modifications are valid.
2271

2272
    """
2273
    for (op, _, params) in mods:
2274
      assert ht.TDict(params)
2275

    
2276
      # If 'key_types' is an empty dict, we assume we have an
2277
      # 'ext' template and thus do not ForceDictType
2278
      if key_types:
2279
        utils.ForceDictType(params, key_types)
2280

    
2281
      if op == constants.DDM_REMOVE:
2282
        if params:
2283
          raise errors.OpPrereqError("No settings should be passed when"
2284
                                     " removing a %s" % kind,
2285
                                     errors.ECODE_INVAL)
2286
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2287
        item_fn(op, params)
2288
      else:
2289
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2290

    
2291
  @staticmethod
2292
  def _VerifyDiskModification(op, params, excl_stor):
2293
    """Verifies a disk modification.
2294

2295
    """
2296
    if op == constants.DDM_ADD:
2297
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2298
      if mode not in constants.DISK_ACCESS_SET:
2299
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2300
                                   errors.ECODE_INVAL)
2301

    
2302
      size = params.get(constants.IDISK_SIZE, None)
2303
      if size is None:
2304
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2305
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2306

    
2307
      try:
2308
        size = int(size)
2309
      except (TypeError, ValueError), err:
2310
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2311
                                   errors.ECODE_INVAL)
2312

    
2313
      params[constants.IDISK_SIZE] = size
2314
      name = params.get(constants.IDISK_NAME, None)
2315
      if name is not None and name.lower() == constants.VALUE_NONE:
2316
        params[constants.IDISK_NAME] = None
2317

    
2318
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2319

    
2320
    elif op == constants.DDM_MODIFY:
2321
      if constants.IDISK_SIZE in params:
2322
        raise errors.OpPrereqError("Disk size change not possible, use"
2323
                                   " grow-disk", errors.ECODE_INVAL)
2324
      if len(params) > 2:
2325
        raise errors.OpPrereqError("Disk modification doesn't support"
2326
                                   " additional arbitrary parameters",
2327
                                   errors.ECODE_INVAL)
2328
      name = params.get(constants.IDISK_NAME, None)
2329
      if name is not None and name.lower() == constants.VALUE_NONE:
2330
        params[constants.IDISK_NAME] = None
2331

    
2332
  @staticmethod
2333
  def _VerifyNicModification(op, params):
2334
    """Verifies a network interface modification.
2335

2336
    """
2337
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2338
      ip = params.get(constants.INIC_IP, None)
2339
      name = params.get(constants.INIC_NAME, None)
2340
      req_net = params.get(constants.INIC_NETWORK, None)
2341
      link = params.get(constants.NIC_LINK, None)
2342
      mode = params.get(constants.NIC_MODE, None)
2343
      if name is not None and name.lower() == constants.VALUE_NONE:
2344
        params[constants.INIC_NAME] = None
2345
      if req_net is not None:
2346
        if req_net.lower() == constants.VALUE_NONE:
2347
          params[constants.INIC_NETWORK] = None
2348
          req_net = None
2349
        elif link is not None or mode is not None:
2350
          raise errors.OpPrereqError("If network is given"
2351
                                     " mode or link should not",
2352
                                     errors.ECODE_INVAL)
2353

    
2354
      if op == constants.DDM_ADD:
2355
        macaddr = params.get(constants.INIC_MAC, None)
2356
        if macaddr is None:
2357
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2358

    
2359
      if ip is not None:
2360
        if ip.lower() == constants.VALUE_NONE:
2361
          params[constants.INIC_IP] = None
2362
        else:
2363
          if ip.lower() == constants.NIC_IP_POOL:
2364
            if op == constants.DDM_ADD and req_net is None:
2365
              raise errors.OpPrereqError("If ip=pool, parameter network"
2366
                                         " cannot be none",
2367
                                         errors.ECODE_INVAL)
2368
          else:
2369
            if not netutils.IPAddress.IsValid(ip):
2370
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2371
                                         errors.ECODE_INVAL)
2372

    
2373
      if constants.INIC_MAC in params:
2374
        macaddr = params[constants.INIC_MAC]
2375
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2376
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2377

    
2378
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2379
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2380
                                     " modifying an existing NIC",
2381
                                     errors.ECODE_INVAL)
2382

    
2383
  def CheckArguments(self):
2384
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2385
            self.op.hvparams or self.op.beparams or self.op.os_name or
2386
            self.op.offline is not None or self.op.runtime_mem or
2387
            self.op.pnode):
2388
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2389

    
2390
    if self.op.hvparams:
2391
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2392
                           "hypervisor", "instance", "cluster")
2393

    
2394
    self.op.disks = self._UpgradeDiskNicMods(
2395
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2396
    self.op.nics = self._UpgradeDiskNicMods(
2397
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2398

    
2399
    if self.op.disks and self.op.disk_template is not None:
2400
      raise errors.OpPrereqError("Disk template conversion and other disk"
2401
                                 " changes not supported at the same time",
2402
                                 errors.ECODE_INVAL)
2403

    
2404
    if (self.op.disk_template and
2405
        self.op.disk_template in constants.DTS_INT_MIRROR and
2406
        self.op.remote_node is None):
2407
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2408
                                 " one requires specifying a secondary node",
2409
                                 errors.ECODE_INVAL)
2410

    
2411
    # Check NIC modifications
2412
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2413
                    self._VerifyNicModification)
2414

    
2415
    if self.op.pnode:
2416
      (self.op.pnode_uuid, self.op.pnode) = \
2417
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2418

    
2419
  def ExpandNames(self):
2420
    self._ExpandAndLockInstance()
2421
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2422
    # Can't even acquire node locks in shared mode as upcoming changes in
2423
    # Ganeti 2.6 will start to modify the node object on disk conversion
2424
    self.needed_locks[locking.LEVEL_NODE] = []
2425
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2426
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2427
    # Look node group to look up the ipolicy
2428
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2429

    
2430
  def DeclareLocks(self, level):
2431
    if level == locking.LEVEL_NODEGROUP:
2432
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2433
      # Acquire locks for the instance's nodegroups optimistically. Needs
2434
      # to be verified in CheckPrereq
2435
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2436
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2437
    elif level == locking.LEVEL_NODE:
2438
      self._LockInstancesNodes()
2439
      if self.op.disk_template and self.op.remote_node:
2440
        (self.op.remote_node_uuid, self.op.remote_node) = \
2441
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2442
                                self.op.remote_node)
2443
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2444
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2445
      # Copy node locks
2446
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2447
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2448

    
2449
  def BuildHooksEnv(self):
2450
    """Build hooks env.
2451

2452
    This runs on the master, primary and secondaries.
2453

2454
    """
2455
    args = {}
2456
    if constants.BE_MINMEM in self.be_new:
2457
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2458
    if constants.BE_MAXMEM in self.be_new:
2459
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2460
    if constants.BE_VCPUS in self.be_new:
2461
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2462
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2463
    # information at all.
2464

    
2465
    if self._new_nics is not None:
2466
      nics = []
2467

    
2468
      for nic in self._new_nics:
2469
        n = copy.deepcopy(nic)
2470
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2471
        n.nicparams = nicparams
2472
        nics.append(NICToTuple(self, n))
2473

    
2474
      args["nics"] = nics
2475

    
2476
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2477
    if self.op.disk_template:
2478
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2479
    if self.op.runtime_mem:
2480
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2481

    
2482
    return env
2483

    
2484
  def BuildHooksNodes(self):
2485
    """Build hooks nodes.
2486

2487
    """
2488
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2489
    return (nl, nl)
2490

    
2491
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2492
                              old_params, cluster, pnode_uuid):
2493

    
2494
    update_params_dict = dict([(key, params[key])
2495
                               for key in constants.NICS_PARAMETERS
2496
                               if key in params])
2497

    
2498
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2499
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2500

    
2501
    new_net_uuid = None
2502
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2503
    if new_net_uuid_or_name:
2504
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2505
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2506

    
2507
    if old_net_uuid:
2508
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2509

    
2510
    if new_net_uuid:
2511
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2512
      if not netparams:
2513
        raise errors.OpPrereqError("No netparams found for the network"
2514
                                   " %s, probably not connected" %
2515
                                   new_net_obj.name, errors.ECODE_INVAL)
2516
      new_params = dict(netparams)
2517
    else:
2518
      new_params = GetUpdatedParams(old_params, update_params_dict)
2519

    
2520
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2521

    
2522
    new_filled_params = cluster.SimpleFillNIC(new_params)
2523
    objects.NIC.CheckParameterSyntax(new_filled_params)
2524

    
2525
    new_mode = new_filled_params[constants.NIC_MODE]
2526
    if new_mode == constants.NIC_MODE_BRIDGED:
2527
      bridge = new_filled_params[constants.NIC_LINK]
2528
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2529
      if msg:
2530
        msg = "Error checking bridges on node '%s': %s" % \
2531
                (self.cfg.GetNodeName(pnode_uuid), msg)
2532
        if self.op.force:
2533
          self.warn.append(msg)
2534
        else:
2535
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2536

    
2537
    elif new_mode == constants.NIC_MODE_ROUTED:
2538
      ip = params.get(constants.INIC_IP, old_ip)
2539
      if ip is None:
2540
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2541
                                   " on a routed NIC", errors.ECODE_INVAL)
2542

    
2543
    elif new_mode == constants.NIC_MODE_OVS:
2544
      # TODO: check OVS link
2545
      self.LogInfo("OVS links are currently not checked for correctness")
2546

    
2547
    if constants.INIC_MAC in params:
2548
      mac = params[constants.INIC_MAC]
2549
      if mac is None:
2550
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2551
                                   errors.ECODE_INVAL)
2552
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2553
        # otherwise generate the MAC address
2554
        params[constants.INIC_MAC] = \
2555
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2556
      else:
2557
        # or validate/reserve the current one
2558
        try:
2559
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2560
        except errors.ReservationError:
2561
          raise errors.OpPrereqError("MAC address '%s' already in use"
2562
                                     " in cluster" % mac,
2563
                                     errors.ECODE_NOTUNIQUE)
2564
    elif new_net_uuid != old_net_uuid:
2565

    
2566
      def get_net_prefix(net_uuid):
2567
        mac_prefix = None
2568
        if net_uuid:
2569
          nobj = self.cfg.GetNetwork(net_uuid)
2570
          mac_prefix = nobj.mac_prefix
2571

    
2572
        return mac_prefix
2573

    
2574
      new_prefix = get_net_prefix(new_net_uuid)
2575
      old_prefix = get_net_prefix(old_net_uuid)
2576
      if old_prefix != new_prefix:
2577
        params[constants.INIC_MAC] = \
2578
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2579

    
2580
    # if there is a change in (ip, network) tuple
2581
    new_ip = params.get(constants.INIC_IP, old_ip)
2582
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2583
      if new_ip:
2584
        # if IP is pool then require a network and generate one IP
2585
        if new_ip.lower() == constants.NIC_IP_POOL:
2586
          if new_net_uuid:
2587
            try:
2588
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2589
            except errors.ReservationError:
2590
              raise errors.OpPrereqError("Unable to get a free IP"
2591
                                         " from the address pool",
2592
                                         errors.ECODE_STATE)
2593
            self.LogInfo("Chose IP %s from network %s",
2594
                         new_ip,
2595
                         new_net_obj.name)
2596
            params[constants.INIC_IP] = new_ip
2597
          else:
2598
            raise errors.OpPrereqError("ip=pool, but no network found",
2599
                                       errors.ECODE_INVAL)
2600
        # Reserve new IP if in the new network if any
2601
        elif new_net_uuid:
2602
          try:
2603
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2604
            self.LogInfo("Reserving IP %s in network %s",
2605
                         new_ip, new_net_obj.name)
2606
          except errors.ReservationError:
2607
            raise errors.OpPrereqError("IP %s not available in network %s" %
2608
                                       (new_ip, new_net_obj.name),
2609
                                       errors.ECODE_NOTUNIQUE)
2610
        # new network is None so check if new IP is a conflicting IP
2611
        elif self.op.conflicts_check:
2612
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2613

    
2614
      # release old IP if old network is not None
2615
      if old_ip and old_net_uuid:
2616
        try:
2617
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2618
        except errors.AddressPoolError:
2619
          logging.warning("Release IP %s not contained in network %s",
2620
                          old_ip, old_net_obj.name)
2621

    
2622
    # there are no changes in (ip, network) tuple and old network is not None
2623
    elif (old_net_uuid is not None and
2624
          (req_link is not None or req_mode is not None)):
2625
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2626
                                 " a NIC that is connected to a network",
2627
                                 errors.ECODE_INVAL)
2628

    
2629
    private.params = new_params
2630
    private.filled = new_filled_params
2631

    
2632
  def _PreCheckDiskTemplate(self, pnode_info):
2633
    """CheckPrereq checks related to a new disk template."""
2634
    # Arguments are passed to avoid configuration lookups
2635
    pnode_uuid = self.instance.primary_node
2636
    if self.instance.disk_template == self.op.disk_template:
2637
      raise errors.OpPrereqError("Instance already has disk template %s" %
2638
                                 self.instance.disk_template,
2639
                                 errors.ECODE_INVAL)
2640

    
2641
    if not self.cluster.IsDiskTemplateEnabled(self.instance.disk_template):
2642
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2643
                                 " cluster." % self.instance.disk_template)
2644

    
2645
    if (self.instance.disk_template,
2646
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2647
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2648
                                 " %s to %s" % (self.instance.disk_template,
2649
                                                self.op.disk_template),
2650
                                 errors.ECODE_INVAL)
2651
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2652
                       msg="cannot change disk template")
2653
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2654
      if self.op.remote_node_uuid == pnode_uuid:
2655
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2656
                                   " as the primary node of the instance" %
2657
                                   self.op.remote_node, errors.ECODE_STATE)
2658
      CheckNodeOnline(self, self.op.remote_node_uuid)
2659
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2660
      # FIXME: here we assume that the old instance type is DT_PLAIN
2661
      assert self.instance.disk_template == constants.DT_PLAIN
2662
      disks = [{constants.IDISK_SIZE: d.size,
2663
                constants.IDISK_VG: d.logical_id[0]}
2664
               for d in self.instance.disks]
2665
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2666
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2667

    
2668
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2669
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2670
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2671
                                                              snode_group)
2672
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2673
                             ignore=self.op.ignore_ipolicy)
2674
      if pnode_info.group != snode_info.group:
2675
        self.LogWarning("The primary and secondary nodes are in two"
2676
                        " different node groups; the disk parameters"
2677
                        " from the first disk's node group will be"
2678
                        " used")
2679

    
2680
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2681
      # Make sure none of the nodes require exclusive storage
2682
      nodes = [pnode_info]
2683
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2684
        assert snode_info
2685
        nodes.append(snode_info)
2686
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2687
      if compat.any(map(has_es, nodes)):
2688
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2689
                  " storage is enabled" % (self.instance.disk_template,
2690
                                           self.op.disk_template))
2691
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2692

    
2693
  def _PreCheckDisks(self, ispec):
2694
    """CheckPrereq checks related to disk changes.
2695

2696
    @type ispec: dict
2697
    @param ispec: instance specs to be updated with the new disks
2698

2699
    """
2700
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2701

    
2702
    excl_stor = compat.any(
2703
      rpc.GetExclusiveStorageForNodes(self.cfg,
2704
                                      self.instance.all_nodes).values()
2705
      )
2706

    
2707
    # Check disk modifications. This is done here and not in CheckArguments
2708
    # (as with NICs), because we need to know the instance's disk template
2709
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2710
    if self.instance.disk_template == constants.DT_EXT:
2711
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2712
    else:
2713
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2714
                      ver_fn)
2715

    
2716
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2717

    
2718
    # Check the validity of the `provider' parameter
2719
    if self.instance.disk_template in constants.DT_EXT:
2720
      for mod in self.diskmod:
2721
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2722
        if mod[0] == constants.DDM_ADD:
2723
          if ext_provider is None:
2724
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2725
                                       " '%s' missing, during disk add" %
2726
                                       (constants.DT_EXT,
2727
                                        constants.IDISK_PROVIDER),
2728
                                       errors.ECODE_NOENT)
2729
        elif mod[0] == constants.DDM_MODIFY:
2730
          if ext_provider:
2731
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2732
                                       " modification" %
2733
                                       constants.IDISK_PROVIDER,
2734
                                       errors.ECODE_INVAL)
2735
    else:
2736
      for mod in self.diskmod:
2737
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2738
        if ext_provider is not None:
2739
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2740
                                     " instances of type '%s'" %
2741
                                     (constants.IDISK_PROVIDER,
2742
                                      constants.DT_EXT),
2743
                                     errors.ECODE_INVAL)
2744

    
2745
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2746
      raise errors.OpPrereqError("Disk operations not supported for"
2747
                                 " diskless instances", errors.ECODE_INVAL)
2748

    
2749
    def _PrepareDiskMod(_, disk, params, __):
2750
      disk.name = params.get(constants.IDISK_NAME, None)
2751

    
2752
    # Verify disk changes (operating on a copy)
2753
    disks = copy.deepcopy(self.instance.disks)
2754
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2755
                        _PrepareDiskMod, None)
2756
    utils.ValidateDeviceNames("disk", disks)
2757
    if len(disks) > constants.MAX_DISKS:
2758
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2759
                                 " more" % constants.MAX_DISKS,
2760
                                 errors.ECODE_STATE)
2761
    disk_sizes = [disk.size for disk in self.instance.disks]
2762
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2763
                      self.diskmod if op == constants.DDM_ADD)
2764
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2765
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2766

    
2767
    if self.op.offline is not None and self.op.offline:
2768
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2769
                         msg="can't change to offline")
2770

    
2771
  def CheckPrereq(self):
2772
    """Check prerequisites.
2773

2774
    This only checks the instance list against the existing names.
2775

2776
    """
2777
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2778
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2779
    self.cluster = self.cfg.GetClusterInfo()
2780

    
2781
    assert self.instance is not None, \
2782
      "Cannot retrieve locked instance %s" % self.op.instance_name
2783

    
2784
    pnode_uuid = self.instance.primary_node
2785

    
2786
    self.warn = []
2787

    
2788
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2789
        not self.op.force):
2790
      # verify that the instance is not up
2791
      instance_info = self.rpc.call_instance_info(
2792
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2793
          self.instance.hvparams)
2794
      if instance_info.fail_msg:
2795
        self.warn.append("Can't get instance runtime information: %s" %
2796
                         instance_info.fail_msg)
2797
      elif instance_info.payload:
2798
        raise errors.OpPrereqError("Instance is still running on %s" %
2799
                                   self.cfg.GetNodeName(pnode_uuid),
2800
                                   errors.ECODE_STATE)
2801

    
2802
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2803
    node_uuids = list(self.instance.all_nodes)
2804
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2805

    
2806
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2807
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2808
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2809

    
2810
    # dictionary with instance information after the modification
2811
    ispec = {}
2812

    
2813
    # Prepare NIC modifications
2814
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2815

    
2816
    # OS change
2817
    if self.op.os_name and not self.op.force:
2818
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2819
                     self.op.force_variant)
2820
      instance_os = self.op.os_name
2821
    else:
2822
      instance_os = self.instance.os
2823

    
2824
    assert not (self.op.disk_template and self.op.disks), \
2825
      "Can't modify disk template and apply disk changes at the same time"
2826

    
2827
    if self.op.disk_template:
2828
      self._PreCheckDiskTemplate(pnode_info)
2829

    
2830
    self._PreCheckDisks(ispec)
2831

    
2832
    # hvparams processing
2833
    if self.op.hvparams:
2834
      hv_type = self.instance.hypervisor
2835
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2836
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2837
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2838

    
2839
      # local check
2840
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2841
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2842
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2843
      self.hv_inst = i_hvdict # the new dict (without defaults)
2844
    else:
2845
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2846
                                                   self.instance.os,
2847
                                                   self.instance.hvparams)
2848
      self.hv_new = self.hv_inst = {}
2849

    
2850
    # beparams processing
2851
    if self.op.beparams:
2852
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2853
                                  use_none=True)
2854
      objects.UpgradeBeParams(i_bedict)
2855
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2856
      be_new = self.cluster.SimpleFillBE(i_bedict)
2857
      self.be_proposed = self.be_new = be_new # the new actual values
2858
      self.be_inst = i_bedict # the new dict (without defaults)
2859
    else:
2860
      self.be_new = self.be_inst = {}
2861
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2862
    be_old = self.cluster.FillBE(self.instance)
2863

    
2864
    # CPU param validation -- checking every time a parameter is
2865
    # changed to cover all cases where either CPU mask or vcpus have
2866
    # changed
2867
    if (constants.BE_VCPUS in self.be_proposed and
2868
        constants.HV_CPU_MASK in self.hv_proposed):
2869
      cpu_list = \
2870
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2871
      # Verify mask is consistent with number of vCPUs. Can skip this
2872
      # test if only 1 entry in the CPU mask, which means same mask
2873
      # is applied to all vCPUs.
2874
      if (len(cpu_list) > 1 and
2875
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2876
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2877
                                   " CPU mask [%s]" %
2878
                                   (self.be_proposed[constants.BE_VCPUS],
2879
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2880
                                   errors.ECODE_INVAL)
2881

    
2882
      # Only perform this test if a new CPU mask is given
2883
      if constants.HV_CPU_MASK in self.hv_new:
2884
        # Calculate the largest CPU number requested
2885
        max_requested_cpu = max(map(max, cpu_list))
2886
        # Check that all of the instance's nodes have enough physical CPUs to
2887
        # satisfy the requested CPU mask
2888
        hvspecs = [(self.instance.hypervisor,
2889
                    self.cfg.GetClusterInfo()
2890
                      .hvparams[self.instance.hypervisor])]
2891
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2892
                                max_requested_cpu + 1,
2893
                                hvspecs)
2894

    
2895
    # osparams processing
2896
    if self.op.osparams:
2897
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2898
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2899
      self.os_inst = i_osdict # the new dict (without defaults)
2900
    else:
2901
      self.os_inst = {}
2902

    
2903
    #TODO(dynmem): do the appropriate check involving MINMEM
2904
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2905
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2906
      mem_check_list = [pnode_uuid]
2907
      if be_new[constants.BE_AUTO_BALANCE]:
2908
        # either we changed auto_balance to yes or it was from before
2909
        mem_check_list.extend(self.instance.secondary_nodes)
2910
      instance_info = self.rpc.call_instance_info(
2911
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2912
          self.instance.hvparams)
2913
      hvspecs = [(self.instance.hypervisor,
2914
                  self.cluster.hvparams[self.instance.hypervisor])]
2915
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2916
                                         hvspecs)
2917
      pninfo = nodeinfo[pnode_uuid]
2918
      msg = pninfo.fail_msg
2919
      if msg:
2920
        # Assume the primary node is unreachable and go ahead
2921
        self.warn.append("Can't get info from primary node %s: %s" %
2922
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2923
      else:
2924
        (_, _, (pnhvinfo, )) = pninfo.payload
2925
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2926
          self.warn.append("Node data from primary node %s doesn't contain"
2927
                           " free memory information" %
2928
                           self.cfg.GetNodeName(pnode_uuid))
2929
        elif instance_info.fail_msg:
2930
          self.warn.append("Can't get instance runtime information: %s" %
2931
                           instance_info.fail_msg)
2932
        else:
2933
          if instance_info.payload:
2934
            current_mem = int(instance_info.payload["memory"])
2935
          else:
2936
            # Assume instance not running
2937
            # (there is a slight race condition here, but it's not very
2938
            # probable, and we have no other way to check)
2939
            # TODO: Describe race condition
2940
            current_mem = 0
2941
          #TODO(dynmem): do the appropriate check involving MINMEM
2942
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2943
                      pnhvinfo["memory_free"])
2944
          if miss_mem > 0:
2945
            raise errors.OpPrereqError("This change will prevent the instance"
2946
                                       " from starting, due to %d MB of memory"
2947
                                       " missing on its primary node" %
2948
                                       miss_mem, errors.ECODE_NORES)
2949

    
2950
      if be_new[constants.BE_AUTO_BALANCE]:
2951
        for node_uuid, nres in nodeinfo.items():
2952
          if node_uuid not in self.instance.secondary_nodes:
2953
            continue
2954
          nres.Raise("Can't get info from secondary node %s" %
2955
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2956
                     ecode=errors.ECODE_STATE)
2957
          (_, _, (nhvinfo, )) = nres.payload
2958
          if not isinstance(nhvinfo.get("memory_free", None), int):
2959
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2960
                                       " memory information" %
2961
                                       self.cfg.GetNodeName(node_uuid),
2962
                                       errors.ECODE_STATE)
2963
          #TODO(dynmem): do the appropriate check involving MINMEM
2964
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2965
            raise errors.OpPrereqError("This change will prevent the instance"
2966
                                       " from failover to its secondary node"
2967
                                       " %s, due to not enough memory" %
2968
                                       self.cfg.GetNodeName(node_uuid),
2969
                                       errors.ECODE_STATE)
2970

    
2971
    if self.op.runtime_mem:
2972
      remote_info = self.rpc.call_instance_info(
2973
         self.instance.primary_node, self.instance.name,
2974
         self.instance.hypervisor,
2975
         self.cluster.hvparams[self.instance.hypervisor])
2976
      remote_info.Raise("Error checking node %s" %
2977
                        self.cfg.GetNodeName(self.instance.primary_node))
2978
      if not remote_info.payload: # not running already
2979
        raise errors.OpPrereqError("Instance %s is not running" %
2980
                                   self.instance.name, errors.ECODE_STATE)
2981

    
2982
      current_memory = remote_info.payload["memory"]
2983
      if (not self.op.force and
2984
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2985
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2986
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2987
                                   " and %d MB of memory unless --force is"
2988
                                   " given" %
2989
                                   (self.instance.name,
2990
                                    self.be_proposed[constants.BE_MINMEM],
2991
                                    self.be_proposed[constants.BE_MAXMEM]),
2992
                                   errors.ECODE_INVAL)
2993

    
2994
      delta = self.op.runtime_mem - current_memory
2995
      if delta > 0:
2996
        CheckNodeFreeMemory(
2997
            self, self.instance.primary_node,
2998
            "ballooning memory for instance %s" % self.instance.name, delta,
2999
            self.instance.hypervisor,
3000
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3001

    
3002
    # make self.cluster visible in the functions below
3003
    cluster = self.cluster
3004

    
3005
    def _PrepareNicCreate(_, params, private):
3006
      self._PrepareNicModification(params, private, None, None,
3007
                                   {}, cluster, pnode_uuid)
3008
      return (None, None)
3009

    
3010
    def _PrepareNicMod(_, nic, params, private):
3011
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3012
                                   nic.nicparams, cluster, pnode_uuid)
3013
      return None
3014

    
3015
    def _PrepareNicRemove(_, params, __):
3016
      ip = params.ip
3017
      net = params.network
3018
      if net is not None and ip is not None:
3019
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3020

    
3021
    # Verify NIC changes (operating on copy)
3022
    nics = self.instance.nics[:]
3023
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3024
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3025
    if len(nics) > constants.MAX_NICS:
3026
      raise errors.OpPrereqError("Instance has too many network interfaces"
3027
                                 " (%d), cannot add more" % constants.MAX_NICS,
3028
                                 errors.ECODE_STATE)
3029

    
3030
    # Pre-compute NIC changes (necessary to use result in hooks)
3031
    self._nic_chgdesc = []
3032
    if self.nicmod:
3033
      # Operate on copies as this is still in prereq
3034
      nics = [nic.Copy() for nic in self.instance.nics]
3035
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3036
                          self._CreateNewNic, self._ApplyNicMods,
3037
                          self._RemoveNic)
3038
      # Verify that NIC names are unique and valid
3039
      utils.ValidateDeviceNames("NIC", nics)
3040
      self._new_nics = nics
3041
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3042
    else:
3043
      self._new_nics = None
3044
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3045

    
3046
    if not self.op.ignore_ipolicy:
3047
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3048
                                                              group_info)
3049

    
3050
      # Fill ispec with backend parameters
3051
      ispec[constants.ISPEC_SPINDLE_USE] = \
3052
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3053
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3054
                                                         None)
3055

    
3056
      # Copy ispec to verify parameters with min/max values separately
3057
      if self.op.disk_template:
3058
        new_disk_template = self.op.disk_template
3059
      else:
3060
        new_disk_template = self.instance.disk_template
3061
      ispec_max = ispec.copy()
3062
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3063
        self.be_new.get(constants.BE_MAXMEM, None)
3064
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3065
                                                     new_disk_template)
3066
      ispec_min = ispec.copy()
3067
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3068
        self.be_new.get(constants.BE_MINMEM, None)
3069
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3070
                                                     new_disk_template)
3071

    
3072
      if (res_max or res_min):
3073
        # FIXME: Improve error message by including information about whether
3074
        # the upper or lower limit of the parameter fails the ipolicy.
3075
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3076
               (group_info, group_info.name,
3077
                utils.CommaJoin(set(res_max + res_min))))
3078
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3079

    
3080
  def _ConvertPlainToDrbd(self, feedback_fn):
3081
    """Converts an instance from plain to drbd.
3082

3083
    """
3084
    feedback_fn("Converting template to drbd")
3085
    pnode_uuid = self.instance.primary_node
3086
    snode_uuid = self.op.remote_node_uuid
3087

    
3088
    assert self.instance.disk_template == constants.DT_PLAIN
3089

    
3090
    # create a fake disk info for _GenerateDiskTemplate
3091
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3092
                  constants.IDISK_VG: d.logical_id[0],
3093
                  constants.IDISK_NAME: d.name}
3094
                 for d in self.instance.disks]
3095
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3096
                                     self.instance.uuid, pnode_uuid,
3097
                                     [snode_uuid], disk_info, None, None, 0,
3098
                                     feedback_fn, self.diskparams)
3099
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3100
                                        self.diskparams)
3101
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3102
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3103
    info = GetInstanceInfoText(self.instance)
3104
    feedback_fn("Creating additional volumes...")
3105
    # first, create the missing data and meta devices
3106
    for disk in anno_disks:
3107
      # unfortunately this is... not too nice
3108
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3109
                           info, True, p_excl_stor)
3110
      for child in disk.children:
3111
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3112
                             s_excl_stor)
3113
    # at this stage, all new LVs have been created, we can rename the
3114
    # old ones
3115
    feedback_fn("Renaming original volumes...")
3116
    rename_list = [(o, n.children[0].logical_id)
3117
                   for (o, n) in zip(self.instance.disks, new_disks)]
3118
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3119
    result.Raise("Failed to rename original LVs")
3120

    
3121
    feedback_fn("Initializing DRBD devices...")
3122
    # all child devices are in place, we can now create the DRBD devices
3123
    try:
3124
      for disk in anno_disks:
3125
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3126
                                       (snode_uuid, s_excl_stor)]:
3127
          f_create = node_uuid == pnode_uuid
3128
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3129
                               f_create, excl_stor)
3130
    except errors.GenericError, e:
3131
      feedback_fn("Initializing of DRBD devices failed;"
3132
                  " renaming back original volumes...")
3133
      for disk in new_disks:
3134
        self.cfg.SetDiskID(disk, pnode_uuid)
3135
      rename_back_list = [(n.children[0], o.logical_id)
3136
                          for (n, o) in zip(new_disks, self.instance.disks)]
3137
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3138
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3139
      raise
3140

    
3141
    # at this point, the instance has been modified
3142
    self.instance.disk_template = constants.DT_DRBD8
3143
    self.instance.disks = new_disks
3144
    self.cfg.Update(self.instance, feedback_fn)
3145

    
3146
    # Release node locks while waiting for sync
3147
    ReleaseLocks(self, locking.LEVEL_NODE)
3148

    
3149
    # disks are created, waiting for sync
3150
    disk_abort = not WaitForSync(self, self.instance,
3151
                                 oneshot=not self.op.wait_for_sync)
3152
    if disk_abort:
3153
      raise errors.OpExecError("There are some degraded disks for"
3154
                               " this instance, please cleanup manually")
3155

    
3156
    # Node resource locks will be released by caller
3157

    
3158
  def _ConvertDrbdToPlain(self, feedback_fn):
3159
    """Converts an instance from drbd to plain.
3160

3161
    """
3162
    assert len(self.instance.secondary_nodes) == 1
3163
    assert self.instance.disk_template == constants.DT_DRBD8
3164

    
3165
    pnode_uuid = self.instance.primary_node
3166
    snode_uuid = self.instance.secondary_nodes[0]
3167
    feedback_fn("Converting template to plain")
3168

    
3169
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3170
    new_disks = [d.children[0] for d in self.instance.disks]
3171

    
3172
    # copy over size, mode and name
3173
    for parent, child in zip(old_disks, new_disks):
3174
      child.size = parent.size
3175
      child.mode = parent.mode
3176
      child.name = parent.name
3177

    
3178
    # this is a DRBD disk, return its port to the pool
3179
    # NOTE: this must be done right before the call to cfg.Update!
3180
    for disk in old_disks:
3181
      tcp_port = disk.logical_id[2]
3182
      self.cfg.AddTcpUdpPort(tcp_port)
3183

    
3184
    # update instance structure
3185
    self.instance.disks = new_disks
3186
    self.instance.disk_template = constants.DT_PLAIN
3187
    _UpdateIvNames(0, self.instance.disks)
3188
    self.cfg.Update(self.instance, feedback_fn)
3189

    
3190
    # Release locks in case removing disks takes a while
3191
    ReleaseLocks(self, locking.LEVEL_NODE)
3192

    
3193
    feedback_fn("Removing volumes on the secondary node...")
3194
    for disk in old_disks:
3195
      self.cfg.SetDiskID(disk, snode_uuid)
3196
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3197
      if msg:
3198
        self.LogWarning("Could not remove block device %s on node %s,"
3199
                        " continuing anyway: %s", disk.iv_name,
3200
                        self.cfg.GetNodeName(snode_uuid), msg)
3201

    
3202
    feedback_fn("Removing unneeded volumes on the primary node...")
3203
    for idx, disk in enumerate(old_disks):
3204
      meta = disk.children[1]
3205
      self.cfg.SetDiskID(meta, pnode_uuid)
3206
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3207
      if msg:
3208
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3209
                        " continuing anyway: %s", idx,
3210
                        self.cfg.GetNodeName(pnode_uuid), msg)
3211

    
3212
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3213
    self.LogInfo("Trying to hotplug device...")
3214
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3215
                                          self.instance, action, dev_type,
3216
                                          device, extra, seq)
3217
    result.Raise("Could not hotplug device.")
3218
    self.LogInfo("Hotplug done.")
3219

    
3220
  def _CreateNewDisk(self, idx, params, _):
3221
    """Creates a new disk.
3222

3223
    """
3224
    # add a new disk
3225
    if self.instance.disk_template in constants.DTS_FILEBASED:
3226
      (file_driver, file_path) = self.instance.disks[0].logical_id
3227
      file_path = os.path.dirname(file_path)
3228
    else:
3229
      file_driver = file_path = None
3230

    
3231
    disk = \
3232
      GenerateDiskTemplate(self, self.instance.disk_template,
3233
                           self.instance.uuid, self.instance.primary_node,
3234
                           self.instance.secondary_nodes, [params], file_path,
3235
                           file_driver, idx, self.Log, self.diskparams)[0]
3236

    
3237
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3238

    
3239
    if self.cluster.prealloc_wipe_disks:
3240
      # Wipe new disk
3241
      WipeOrCleanupDisks(self, self.instance,
3242
                         disks=[(idx, disk, 0)],
3243
                         cleanup=new_disks)
3244

    
3245
    if self.op.hotplug:
3246
      _, device_info = AssembleInstanceDisks(self, self.instance,
3247
                                             [disk], check=False)
3248
      _, _, dev_path = device_info[0]
3249
      self._HotplugDevice(constants.HOTPLUG_ADD, constants.HOTPLUG_DISK,
3250
                          disk, dev_path, idx)
3251

    
3252
    return (disk, [
3253
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3254
      ])
3255

    
3256
  @staticmethod
3257
  def _ModifyDisk(idx, disk, params, _):
3258
    """Modifies a disk.
3259

3260
    """
3261
    changes = []
3262
    mode = params.get(constants.IDISK_MODE, None)
3263
    if mode:
3264
      disk.mode = mode
3265
      changes.append(("disk.mode/%d" % idx, disk.mode))
3266

    
3267
    name = params.get(constants.IDISK_NAME, None)
3268
    disk.name = name
3269
    changes.append(("disk.name/%d" % idx, disk.name))
3270

    
3271
    return changes
3272

    
3273
  def _RemoveDisk(self, idx, root, _):
3274
    """Removes a disk.
3275

3276
    """
3277
    if self.op.hotplug and _DeviceHotplugable(root):
3278
      self._HotplugDevice(constants.HOTPLUG_REMOVE, constants.HOTPLUG_DISK,
3279
                          root, None, idx)
3280
      ShutdownInstanceDisks(self, self.instance, [root])
3281

    
3282
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3283
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3284
                             self.instance.primary_node):
3285
      self.cfg.SetDiskID(disk, node_uuid)
3286
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3287
      if msg:
3288
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3289
                        " continuing anyway", idx,
3290
                        self.cfg.GetNodeName(node_uuid), msg)
3291

    
3292
    # if this is a DRBD disk, return its port to the pool
3293
    if root.dev_type in constants.LDS_DRBD:
3294
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3295

    
3296
  def _CreateNewNic(self, idx, params, private):
3297
    """Creates data structure for a new network interface.
3298

3299
    """
3300
    mac = params[constants.INIC_MAC]
3301
    ip = params.get(constants.INIC_IP, None)
3302
    net = params.get(constants.INIC_NETWORK, None)
3303
    name = params.get(constants.INIC_NAME, None)
3304
    net_uuid = self.cfg.LookupNetwork(net)
3305
    #TODO: not private.filled?? can a nic have no nicparams??
3306
    nicparams = private.filled
3307
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3308
                       nicparams=nicparams)
3309
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3310

    
3311
    if self.op.hotplug:
3312
      self._HotplugDevice(constants.HOTPLUG_ADD, constants.HOTPLUG_NIC,
3313
                          nobj, None, idx)
3314

    
3315
    desc =  [
3316
      ("nic.%d" % idx,
3317
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3318
       (mac, ip, private.filled[constants.NIC_MODE],
3319
       private.filled[constants.NIC_LINK], net)),
3320
      ]
3321

    
3322
    return (nobj, desc)
3323

    
3324
  def _ApplyNicMods(self, idx, nic, params, private):
3325
    """Modifies a network interface.
3326

3327
    """
3328
    changes = []
3329

    
3330
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3331
      if key in params:
3332
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3333
        setattr(nic, key, params[key])
3334

    
3335
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3336
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3337
    if new_net_uuid != nic.network:
3338
      changes.append(("nic.network/%d" % idx, new_net))
3339
      nic.network = new_net_uuid
3340

    
3341
    if private.filled:
3342
      nic.nicparams = private.filled
3343

    
3344
      for (key, val) in nic.nicparams.items():
3345
        changes.append(("nic.%s/%d" % (key, idx), val))
3346

    
3347
    if self.op.hotplug and _DeviceHotplugable(nic):
3348
      self._HotplugDevice(constants.HOTPLUG_REMOVE, constants.HOTPLUG_NIC,
3349
                          nic, None, idx)
3350
      self._HotplugDevice(constants.HOTPLUG_ADD, constants.HOTPLUG_NIC,
3351
                          nic, None, idx)
3352

    
3353
    return changes
3354

    
3355
  def _RemoveNic(self, idx, nic, _):
3356
    if self.op.hotplug and _DeviceHotplugable(nic):
3357
      self._HotplugDevice(constants.HOTPLUG_REMOVE, constants.HOTPLUG_NIC,
3358
                          nic, None, idx)
3359

    
3360
  def Exec(self, feedback_fn):
3361
    """Modifies an instance.
3362

3363
    All parameters take effect only at the next restart of the instance.
3364

3365
    """
3366
    # Process here the warnings from CheckPrereq, as we don't have a
3367
    # feedback_fn there.
3368
    # TODO: Replace with self.LogWarning
3369
    for warn in self.warn:
3370
      feedback_fn("WARNING: %s" % warn)
3371

    
3372
    assert ((self.op.disk_template is None) ^
3373
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3374
      "Not owning any node resource locks"
3375

    
3376
    result = []
3377

    
3378
    # New primary node
3379
    if self.op.pnode_uuid:
3380
      self.instance.primary_node = self.op.pnode_uuid
3381

    
3382
    # runtime memory
3383
    if self.op.runtime_mem:
3384
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3385
                                                     self.instance,
3386
                                                     self.op.runtime_mem)
3387
      rpcres.Raise("Cannot modify instance runtime memory")
3388
      result.append(("runtime_memory", self.op.runtime_mem))
3389

    
3390
    # Apply disk changes
3391
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3392
                        self._CreateNewDisk, self._ModifyDisk,
3393
                        self._RemoveDisk)
3394
    _UpdateIvNames(0, self.instance.disks)
3395

    
3396
    if self.op.disk_template:
3397
      if __debug__:
3398
        check_nodes = set(self.instance.all_nodes)
3399
        if self.op.remote_node_uuid:
3400
          check_nodes.add(self.op.remote_node_uuid)
3401
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3402
          owned = self.owned_locks(level)
3403
          assert not (check_nodes - owned), \
3404
            ("Not owning the correct locks, owning %r, expected at least %r" %
3405
             (owned, check_nodes))
3406

    
3407
      r_shut = ShutdownInstanceDisks(self, self.instance)
3408
      if not r_shut:
3409
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3410
                                 " proceed with disk template conversion")
3411
      mode = (self.instance.disk_template, self.op.disk_template)
3412
      try:
3413
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3414
      except:
3415
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3416
        raise
3417
      result.append(("disk_template", self.op.disk_template))
3418

    
3419
      assert self.instance.disk_template == self.op.disk_template, \
3420
        ("Expected disk template '%s', found '%s'" %
3421
         (self.op.disk_template, self.instance.disk_template))
3422

    
3423
    # Release node and resource locks if there are any (they might already have
3424
    # been released during disk conversion)
3425
    ReleaseLocks(self, locking.LEVEL_NODE)
3426
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3427

    
3428
    # Apply NIC changes
3429
    if self._new_nics is not None:
3430
      self.instance.nics = self._new_nics
3431
      result.extend(self._nic_chgdesc)
3432

    
3433
    # hvparams changes
3434
    if self.op.hvparams:
3435
      self.instance.hvparams = self.hv_inst
3436
      for key, val in self.op.hvparams.iteritems():
3437
        result.append(("hv/%s" % key, val))
3438

    
3439
    # beparams changes
3440
    if self.op.beparams:
3441
      self.instance.beparams = self.be_inst
3442
      for key, val in self.op.beparams.iteritems():
3443
        result.append(("be/%s" % key, val))
3444

    
3445
    # OS change
3446
    if self.op.os_name:
3447
      self.instance.os = self.op.os_name
3448

    
3449
    # osparams changes
3450
    if self.op.osparams:
3451
      self.instance.osparams = self.os_inst
3452
      for key, val in self.op.osparams.iteritems():
3453
        result.append(("os/%s" % key, val))
3454

    
3455
    if self.op.offline is None:
3456
      # Ignore
3457
      pass
3458
    elif self.op.offline:
3459
      # Mark instance as offline
3460
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3461
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3462
    else:
3463
      # Mark instance as online, but stopped
3464
      self.cfg.MarkInstanceDown(self.instance.uuid)
3465
      result.append(("admin_state", constants.ADMINST_DOWN))
3466

    
3467
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3468

    
3469
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3470
                self.owned_locks(locking.LEVEL_NODE)), \
3471
      "All node locks should have been released by now"
3472

    
3473
    return result
3474

    
3475
  _DISK_CONVERSIONS = {
3476
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3477
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3478
    }
3479

    
3480

    
3481
class LUInstanceChangeGroup(LogicalUnit):
3482
  HPATH = "instance-change-group"
3483
  HTYPE = constants.HTYPE_INSTANCE
3484
  REQ_BGL = False
3485

    
3486
  def ExpandNames(self):
3487
    self.share_locks = ShareAll()
3488

    
3489
    self.needed_locks = {
3490
      locking.LEVEL_NODEGROUP: [],
3491
      locking.LEVEL_NODE: [],
3492
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3493
      }
3494

    
3495
    self._ExpandAndLockInstance()
3496

    
3497
    if self.op.target_groups:
3498
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3499
                                  self.op.target_groups)
3500
    else:
3501
      self.req_target_uuids = None
3502

    
3503
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3504

    
3505
  def DeclareLocks(self, level):
3506
    if level == locking.LEVEL_NODEGROUP:
3507
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3508

    
3509
      if self.req_target_uuids:
3510
        lock_groups = set(self.req_target_uuids)
3511

    
3512
        # Lock all groups used by instance optimistically; this requires going
3513
        # via the node before it's locked, requiring verification later on
3514
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3515
        lock_groups.update(instance_groups)
3516
      else:
3517
        # No target groups, need to lock all of them
3518
        lock_groups = locking.ALL_SET
3519

    
3520
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3521

    
3522
    elif level == locking.LEVEL_NODE:
3523
      if self.req_target_uuids:
3524
        # Lock all nodes used by instances
3525
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3526
        self._LockInstancesNodes()
3527

    
3528
        # Lock all nodes in all potential target groups
3529
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3530
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3531
        member_nodes = [node_uuid
3532
                        for group in lock_groups
3533
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3534
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3535
      else:
3536
        # Lock all nodes as all groups are potential targets
3537
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3538

    
3539
  def CheckPrereq(self):
3540
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3541
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3542
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3543

    
3544
    assert (self.req_target_uuids is None or
3545
            owned_groups.issuperset(self.req_target_uuids))
3546
    assert owned_instance_names == set([self.op.instance_name])
3547

    
3548
    # Get instance information
3549
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3550

    
3551
    # Check if node groups for locked instance are still correct
3552
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3553
      ("Instance %s's nodes changed while we kept the lock" %
3554
       self.op.instance_name)
3555

    
3556
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3557
                                          owned_groups)
3558

    
3559
    if self.req_target_uuids:
3560
      # User requested specific target groups
3561
      self.target_uuids = frozenset(self.req_target_uuids)
3562
    else:
3563
      # All groups except those used by the instance are potential targets
3564
      self.target_uuids = owned_groups - inst_groups
3565

    
3566
    conflicting_groups = self.target_uuids & inst_groups
3567
    if conflicting_groups:
3568
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3569
                                 " used by the instance '%s'" %
3570
                                 (utils.CommaJoin(conflicting_groups),
3571
                                  self.op.instance_name),
3572
                                 errors.ECODE_INVAL)
3573

    
3574
    if not self.target_uuids:
3575
      raise errors.OpPrereqError("There are no possible target groups",
3576
                                 errors.ECODE_INVAL)
3577

    
3578
  def BuildHooksEnv(self):
3579
    """Build hooks env.
3580

3581
    """
3582
    assert self.target_uuids
3583

    
3584
    env = {
3585
      "TARGET_GROUPS": " ".join(self.target_uuids),
3586
      }
3587

    
3588
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3589

    
3590
    return env
3591

    
3592
  def BuildHooksNodes(self):
3593
    """Build hooks nodes.
3594

3595
    """
3596
    mn = self.cfg.GetMasterNode()
3597
    return ([mn], [mn])
3598

    
3599
  def Exec(self, feedback_fn):
3600
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3601

    
3602
    assert instances == [self.op.instance_name], "Instance not locked"
3603

    
3604
    req = iallocator.IAReqGroupChange(instances=instances,
3605
                                      target_groups=list(self.target_uuids))
3606
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3607

    
3608
    ial.Run(self.op.iallocator)
3609

    
3610
    if not ial.success:
3611
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3612
                                 " instance '%s' using iallocator '%s': %s" %
3613
                                 (self.op.instance_name, self.op.iallocator,
3614
                                  ial.info), errors.ECODE_NORES)
3615

    
3616
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3617

    
3618
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3619
                 " instance '%s'", len(jobs), self.op.instance_name)
3620

    
3621
    return ResultWithJobs(jobs)