Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 4869595d

History | View | Annotate | Download (146.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
import ganeti.rpc.node as rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234
    if vlan:
235
      nicparams[constants.NIC_VLAN] = vlan
236

    
237
    check_params = cluster.SimpleFillNIC(nicparams)
238
    objects.NIC.CheckParameterSyntax(check_params)
239
    net_uuid = cfg.LookupNetwork(net)
240
    name = nic.get(constants.INIC_NAME, None)
241
    if name is not None and name.lower() == constants.VALUE_NONE:
242
      name = None
243
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
244
                          network=net_uuid, nicparams=nicparams)
245
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
246
    nics.append(nic_obj)
247

    
248
  return nics
249

    
250

    
251
def _CheckForConflictingIp(lu, ip, node_uuid):
252
  """In case of conflicting IP address raise error.
253

254
  @type ip: string
255
  @param ip: IP address
256
  @type node_uuid: string
257
  @param node_uuid: node UUID
258

259
  """
260
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
261
  if conf_net is not None:
262
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
263
                                " network %s, but the target NIC does not." %
264
                                (ip, conf_net)),
265
                               errors.ECODE_STATE)
266

    
267
  return (None, None)
268

    
269

    
270
def _ComputeIPolicyInstanceSpecViolation(
271
  ipolicy, instance_spec, disk_template,
272
  _compute_fn=ComputeIPolicySpecViolation):
273
  """Compute if instance specs meets the specs of ipolicy.
274

275
  @type ipolicy: dict
276
  @param ipolicy: The ipolicy to verify against
277
  @param instance_spec: dict
278
  @param instance_spec: The instance spec to verify
279
  @type disk_template: string
280
  @param disk_template: the disk template of the instance
281
  @param _compute_fn: The function to verify ipolicy (unittest only)
282
  @see: L{ComputeIPolicySpecViolation}
283

284
  """
285
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
286
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
287
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
288
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
289
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
290
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
291

    
292
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
293
                     disk_sizes, spindle_use, disk_template)
294

    
295

    
296
def _CheckOSVariant(os_obj, name):
297
  """Check whether an OS name conforms to the os variants specification.
298

299
  @type os_obj: L{objects.OS}
300
  @param os_obj: OS object to check
301
  @type name: string
302
  @param name: OS name passed by the user, to check for validity
303

304
  """
305
  variant = objects.OS.GetVariant(name)
306
  if not os_obj.supported_variants:
307
    if variant:
308
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
309
                                 " passed)" % (os_obj.name, variant),
310
                                 errors.ECODE_INVAL)
311
    return
312
  if not variant:
313
    raise errors.OpPrereqError("OS name must include a variant",
314
                               errors.ECODE_INVAL)
315

    
316
  if variant not in os_obj.supported_variants:
317
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
318

    
319

    
320
class LUInstanceCreate(LogicalUnit):
321
  """Create an instance.
322

323
  """
324
  HPATH = "instance-add"
325
  HTYPE = constants.HTYPE_INSTANCE
326
  REQ_BGL = False
327

    
328
  def _CheckDiskTemplateValid(self):
329
    """Checks validity of disk template.
330

331
    """
332
    cluster = self.cfg.GetClusterInfo()
333
    if self.op.disk_template is None:
334
      # FIXME: It would be better to take the default disk template from the
335
      # ipolicy, but for the ipolicy we need the primary node, which we get from
336
      # the iallocator, which wants the disk template as input. To solve this
337
      # chicken-and-egg problem, it should be possible to specify just a node
338
      # group from the iallocator and take the ipolicy from that.
339
      self.op.disk_template = cluster.enabled_disk_templates[0]
340
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
341

    
342
  def _CheckDiskArguments(self):
343
    """Checks validity of disk-related arguments.
344

345
    """
346
    # check that disk's names are unique and valid
347
    utils.ValidateDeviceNames("disk", self.op.disks)
348

    
349
    self._CheckDiskTemplateValid()
350

    
351
    # check disks. parameter names and consistent adopt/no-adopt strategy
352
    has_adopt = has_no_adopt = False
353
    for disk in self.op.disks:
354
      if self.op.disk_template != constants.DT_EXT:
355
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
356
      if constants.IDISK_ADOPT in disk:
357
        has_adopt = True
358
      else:
359
        has_no_adopt = True
360
    if has_adopt and has_no_adopt:
361
      raise errors.OpPrereqError("Either all disks are adopted or none is",
362
                                 errors.ECODE_INVAL)
363
    if has_adopt:
364
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
365
        raise errors.OpPrereqError("Disk adoption is not supported for the"
366
                                   " '%s' disk template" %
367
                                   self.op.disk_template,
368
                                   errors.ECODE_INVAL)
369
      if self.op.iallocator is not None:
370
        raise errors.OpPrereqError("Disk adoption not allowed with an"
371
                                   " iallocator script", errors.ECODE_INVAL)
372
      if self.op.mode == constants.INSTANCE_IMPORT:
373
        raise errors.OpPrereqError("Disk adoption not allowed for"
374
                                   " instance import", errors.ECODE_INVAL)
375
    else:
376
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
377
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
378
                                   " but no 'adopt' parameter given" %
379
                                   self.op.disk_template,
380
                                   errors.ECODE_INVAL)
381

    
382
    self.adopt_disks = has_adopt
383

    
384
  def _CheckVLANArguments(self):
385
    """ Check validity of VLANs if given
386

387
    """
388
    for nic in self.op.nics:
389
      vlan = nic.get(constants.INIC_VLAN, None)
390
      if vlan:
391
        if vlan[0] == ".":
392
          # vlan starting with dot means single untagged vlan,
393
          # might be followed by trunk (:)
394
          if not vlan[1:].isdigit():
395
            vlanlist = vlan[1:].split(':')
396
            for vl in vlanlist:
397
              if not vl.isdigit():
398
                raise errors.OpPrereqError("Specified VLAN parameter is "
399
                                           "invalid : %s" % vlan,
400
                                             errors.ECODE_INVAL)
401
        elif vlan[0] == ":":
402
          # Trunk - tagged only
403
          vlanlist = vlan[1:].split(':')
404
          for vl in vlanlist:
405
            if not vl.isdigit():
406
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
407
                                           " : %s" % vlan, errors.ECODE_INVAL)
408
        elif vlan.isdigit():
409
          # This is the simplest case. No dots, only single digit
410
          # -> Create untagged access port, dot needs to be added
411
          nic[constants.INIC_VLAN] = "." + vlan
412
        else:
413
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
414
                                       " : %s" % vlan, errors.ECODE_INVAL)
415

    
416
  def CheckArguments(self):
417
    """Check arguments.
418

419
    """
420
    # do not require name_check to ease forward/backward compatibility
421
    # for tools
422
    if self.op.no_install and self.op.start:
423
      self.LogInfo("No-installation mode selected, disabling startup")
424
      self.op.start = False
425
    # validate/normalize the instance name
426
    self.op.instance_name = \
427
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
428

    
429
    if self.op.ip_check and not self.op.name_check:
430
      # TODO: make the ip check more flexible and not depend on the name check
431
      raise errors.OpPrereqError("Cannot do IP address check without a name"
432
                                 " check", errors.ECODE_INVAL)
433

    
434
    # check nics' parameter names
435
    for nic in self.op.nics:
436
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
437
    # check that NIC's parameters names are unique and valid
438
    utils.ValidateDeviceNames("NIC", self.op.nics)
439

    
440
    self._CheckVLANArguments()
441

    
442
    self._CheckDiskArguments()
443
    assert self.op.disk_template is not None
444

    
445
    # instance name verification
446
    if self.op.name_check:
447
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
448
      self.op.instance_name = self.hostname.name
449
      # used in CheckPrereq for ip ping check
450
      self.check_ip = self.hostname.ip
451
    else:
452
      self.check_ip = None
453

    
454
    # file storage checks
455
    if (self.op.file_driver and
456
        not self.op.file_driver in constants.FILE_DRIVER):
457
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
458
                                 self.op.file_driver, errors.ECODE_INVAL)
459

    
460
    # set default file_driver if unset and required
461
    if (not self.op.file_driver and
462
        self.op.disk_template in constants.DTS_FILEBASED):
463
      self.op.file_driver = constants.FD_LOOP
464

    
465
    ### Node/iallocator related checks
466
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
467

    
468
    if self.op.pnode is not None:
469
      if self.op.disk_template in constants.DTS_INT_MIRROR:
470
        if self.op.snode is None:
471
          raise errors.OpPrereqError("The networked disk templates need"
472
                                     " a mirror node", errors.ECODE_INVAL)
473
      elif self.op.snode:
474
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
475
                        " template")
476
        self.op.snode = None
477

    
478
    _CheckOpportunisticLocking(self.op)
479

    
480
    if self.op.mode == constants.INSTANCE_IMPORT:
481
      # On import force_variant must be True, because if we forced it at
482
      # initial install, our only chance when importing it back is that it
483
      # works again!
484
      self.op.force_variant = True
485

    
486
      if self.op.no_install:
487
        self.LogInfo("No-installation mode has no effect during import")
488

    
489
    elif self.op.mode == constants.INSTANCE_CREATE:
490
      if self.op.os_type is None:
491
        raise errors.OpPrereqError("No guest OS specified",
492
                                   errors.ECODE_INVAL)
493
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
494
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
495
                                   " installation" % self.op.os_type,
496
                                   errors.ECODE_STATE)
497
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
498
      self._cds = GetClusterDomainSecret()
499

    
500
      # Check handshake to ensure both clusters have the same domain secret
501
      src_handshake = self.op.source_handshake
502
      if not src_handshake:
503
        raise errors.OpPrereqError("Missing source handshake",
504
                                   errors.ECODE_INVAL)
505

    
506
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
507
                                                           src_handshake)
508
      if errmsg:
509
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
510
                                   errors.ECODE_INVAL)
511

    
512
      # Load and check source CA
513
      self.source_x509_ca_pem = self.op.source_x509_ca
514
      if not self.source_x509_ca_pem:
515
        raise errors.OpPrereqError("Missing source X509 CA",
516
                                   errors.ECODE_INVAL)
517

    
518
      try:
519
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
520
                                                    self._cds)
521
      except OpenSSL.crypto.Error, err:
522
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
523
                                   (err, ), errors.ECODE_INVAL)
524

    
525
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
526
      if errcode is not None:
527
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
528
                                   errors.ECODE_INVAL)
529

    
530
      self.source_x509_ca = cert
531

    
532
      src_instance_name = self.op.source_instance_name
533
      if not src_instance_name:
534
        raise errors.OpPrereqError("Missing source instance name",
535
                                   errors.ECODE_INVAL)
536

    
537
      self.source_instance_name = \
538
        netutils.GetHostname(name=src_instance_name).name
539

    
540
    else:
541
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
542
                                 self.op.mode, errors.ECODE_INVAL)
543

    
544
  def ExpandNames(self):
545
    """ExpandNames for CreateInstance.
546

547
    Figure out the right locks for instance creation.
548

549
    """
550
    self.needed_locks = {}
551

    
552
    # this is just a preventive check, but someone might still add this
553
    # instance in the meantime, and creation will fail at lock-add time
554
    if self.op.instance_name in\
555
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
556
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
557
                                 self.op.instance_name, errors.ECODE_EXISTS)
558

    
559
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
560

    
561
    if self.op.iallocator:
562
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
563
      # specifying a group on instance creation and then selecting nodes from
564
      # that group
565
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
566
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
567

    
568
      if self.op.opportunistic_locking:
569
        self.opportunistic_locks[locking.LEVEL_NODE] = True
570
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
571
    else:
572
      (self.op.pnode_uuid, self.op.pnode) = \
573
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
574
      nodelist = [self.op.pnode_uuid]
575
      if self.op.snode is not None:
576
        (self.op.snode_uuid, self.op.snode) = \
577
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
578
        nodelist.append(self.op.snode_uuid)
579
      self.needed_locks[locking.LEVEL_NODE] = nodelist
580

    
581
    # in case of import lock the source node too
582
    if self.op.mode == constants.INSTANCE_IMPORT:
583
      src_node = self.op.src_node
584
      src_path = self.op.src_path
585

    
586
      if src_path is None:
587
        self.op.src_path = src_path = self.op.instance_name
588

    
589
      if src_node is None:
590
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
591
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
592
        self.op.src_node = None
593
        if os.path.isabs(src_path):
594
          raise errors.OpPrereqError("Importing an instance from a path"
595
                                     " requires a source node option",
596
                                     errors.ECODE_INVAL)
597
      else:
598
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
599
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
600
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
601
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
602
        if not os.path.isabs(src_path):
603
          self.op.src_path = \
604
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
605

    
606
    self.needed_locks[locking.LEVEL_NODE_RES] = \
607
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
608

    
609
  def _RunAllocator(self):
610
    """Run the allocator based on input opcode.
611

612
    """
613
    if self.op.opportunistic_locking:
614
      # Only consider nodes for which a lock is held
615
      node_name_whitelist = self.cfg.GetNodeNames(
616
        self.owned_locks(locking.LEVEL_NODE))
617
    else:
618
      node_name_whitelist = None
619

    
620
    req = _CreateInstanceAllocRequest(self.op, self.disks,
621
                                      self.nics, self.be_full,
622
                                      node_name_whitelist)
623
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
624

    
625
    ial.Run(self.op.iallocator)
626

    
627
    if not ial.success:
628
      # When opportunistic locks are used only a temporary failure is generated
629
      if self.op.opportunistic_locking:
630
        ecode = errors.ECODE_TEMP_NORES
631
      else:
632
        ecode = errors.ECODE_NORES
633

    
634
      raise errors.OpPrereqError("Can't compute nodes using"
635
                                 " iallocator '%s': %s" %
636
                                 (self.op.iallocator, ial.info),
637
                                 ecode)
638

    
639
    (self.op.pnode_uuid, self.op.pnode) = \
640
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
641
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
642
                 self.op.instance_name, self.op.iallocator,
643
                 utils.CommaJoin(ial.result))
644

    
645
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
646

    
647
    if req.RequiredNodes() == 2:
648
      (self.op.snode_uuid, self.op.snode) = \
649
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
650

    
651
  def BuildHooksEnv(self):
652
    """Build hooks env.
653

654
    This runs on master, primary and secondary nodes of the instance.
655

656
    """
657
    env = {
658
      "ADD_MODE": self.op.mode,
659
      }
660
    if self.op.mode == constants.INSTANCE_IMPORT:
661
      env["SRC_NODE"] = self.op.src_node
662
      env["SRC_PATH"] = self.op.src_path
663
      env["SRC_IMAGES"] = self.src_images
664

    
665
    env.update(BuildInstanceHookEnv(
666
      name=self.op.instance_name,
667
      primary_node_name=self.op.pnode,
668
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
669
      status=self.op.start,
670
      os_type=self.op.os_type,
671
      minmem=self.be_full[constants.BE_MINMEM],
672
      maxmem=self.be_full[constants.BE_MAXMEM],
673
      vcpus=self.be_full[constants.BE_VCPUS],
674
      nics=NICListToTuple(self, self.nics),
675
      disk_template=self.op.disk_template,
676
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
677
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
678
             for d in self.disks],
679
      bep=self.be_full,
680
      hvp=self.hv_full,
681
      hypervisor_name=self.op.hypervisor,
682
      tags=self.op.tags,
683
      ))
684

    
685
    return env
686

    
687
  def BuildHooksNodes(self):
688
    """Build hooks nodes.
689

690
    """
691
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
692
    return nl, nl
693

    
694
  def _ReadExportInfo(self):
695
    """Reads the export information from disk.
696

697
    It will override the opcode source node and path with the actual
698
    information, if these two were not specified before.
699

700
    @return: the export information
701

702
    """
703
    assert self.op.mode == constants.INSTANCE_IMPORT
704

    
705
    if self.op.src_node_uuid is None:
706
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
707
      exp_list = self.rpc.call_export_list(locked_nodes)
708
      found = False
709
      for node_uuid in exp_list:
710
        if exp_list[node_uuid].fail_msg:
711
          continue
712
        if self.op.src_path in exp_list[node_uuid].payload:
713
          found = True
714
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
715
          self.op.src_node_uuid = node_uuid
716
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
717
                                            self.op.src_path)
718
          break
719
      if not found:
720
        raise errors.OpPrereqError("No export found for relative path %s" %
721
                                   self.op.src_path, errors.ECODE_INVAL)
722

    
723
    CheckNodeOnline(self, self.op.src_node_uuid)
724
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
725
    result.Raise("No export or invalid export found in dir %s" %
726
                 self.op.src_path)
727

    
728
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
729
    if not export_info.has_section(constants.INISECT_EXP):
730
      raise errors.ProgrammerError("Corrupted export config",
731
                                   errors.ECODE_ENVIRON)
732

    
733
    ei_version = export_info.get(constants.INISECT_EXP, "version")
734
    if int(ei_version) != constants.EXPORT_VERSION:
735
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
736
                                 (ei_version, constants.EXPORT_VERSION),
737
                                 errors.ECODE_ENVIRON)
738
    return export_info
739

    
740
  def _ReadExportParams(self, einfo):
741
    """Use export parameters as defaults.
742

743
    In case the opcode doesn't specify (as in override) some instance
744
    parameters, then try to use them from the export information, if
745
    that declares them.
746

747
    """
748
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
749

    
750
    if not self.op.disks:
751
      disks = []
752
      # TODO: import the disk iv_name too
753
      for idx in range(constants.MAX_DISKS):
754
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
755
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
756
          disks.append({constants.IDISK_SIZE: disk_sz})
757
      self.op.disks = disks
758
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
759
        raise errors.OpPrereqError("No disk info specified and the export"
760
                                   " is missing the disk information",
761
                                   errors.ECODE_INVAL)
762

    
763
    if not self.op.nics:
764
      nics = []
765
      for idx in range(constants.MAX_NICS):
766
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
767
          ndict = {}
768
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
769
            nic_param_name = "nic%d_%s" % (idx, name)
770
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
771
              v = einfo.get(constants.INISECT_INS, nic_param_name)
772
              ndict[name] = v
773
          nics.append(ndict)
774
        else:
775
          break
776
      self.op.nics = nics
777

    
778
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
779
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
780

    
781
    if (self.op.hypervisor is None and
782
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
783
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
784

    
785
    if einfo.has_section(constants.INISECT_HYP):
786
      # use the export parameters but do not override the ones
787
      # specified by the user
788
      for name, value in einfo.items(constants.INISECT_HYP):
789
        if name not in self.op.hvparams:
790
          self.op.hvparams[name] = value
791

    
792
    if einfo.has_section(constants.INISECT_BEP):
793
      # use the parameters, without overriding
794
      for name, value in einfo.items(constants.INISECT_BEP):
795
        if name not in self.op.beparams:
796
          self.op.beparams[name] = value
797
        # Compatibility for the old "memory" be param
798
        if name == constants.BE_MEMORY:
799
          if constants.BE_MAXMEM not in self.op.beparams:
800
            self.op.beparams[constants.BE_MAXMEM] = value
801
          if constants.BE_MINMEM not in self.op.beparams:
802
            self.op.beparams[constants.BE_MINMEM] = value
803
    else:
804
      # try to read the parameters old style, from the main section
805
      for name in constants.BES_PARAMETERS:
806
        if (name not in self.op.beparams and
807
            einfo.has_option(constants.INISECT_INS, name)):
808
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
809

    
810
    if einfo.has_section(constants.INISECT_OSP):
811
      # use the parameters, without overriding
812
      for name, value in einfo.items(constants.INISECT_OSP):
813
        if name not in self.op.osparams:
814
          self.op.osparams[name] = value
815

    
816
  def _RevertToDefaults(self, cluster):
817
    """Revert the instance parameters to the default values.
818

819
    """
820
    # hvparams
821
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
822
    for name in self.op.hvparams.keys():
823
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
824
        del self.op.hvparams[name]
825
    # beparams
826
    be_defs = cluster.SimpleFillBE({})
827
    for name in self.op.beparams.keys():
828
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
829
        del self.op.beparams[name]
830
    # nic params
831
    nic_defs = cluster.SimpleFillNIC({})
832
    for nic in self.op.nics:
833
      for name in constants.NICS_PARAMETERS:
834
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
835
          del nic[name]
836
    # osparams
837
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
838
    for name in self.op.osparams.keys():
839
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
840
        del self.op.osparams[name]
841

    
842
  def _CalculateFileStorageDir(self):
843
    """Calculate final instance file storage dir.
844

845
    """
846
    # file storage dir calculation/check
847
    self.instance_file_storage_dir = None
848
    if self.op.disk_template in constants.DTS_FILEBASED:
849
      # build the full file storage dir path
850
      joinargs = []
851

    
852
      cfg_storage = None
853
      if self.op.disk_template == constants.DT_FILE:
854
        cfg_storage = self.cfg.GetFileStorageDir()
855
      elif self.op.disk_template == constants.DT_SHARED_FILE:
856
        cfg_storage = self.cfg.GetSharedFileStorageDir()
857
      elif self.op.disk_template == constants.DT_GLUSTER:
858
        cfg_storage = self.cfg.GetGlusterStorageDir()
859

    
860
      if not cfg_storage:
861
        raise errors.OpPrereqError(
862
          "Cluster file storage dir for {tpl} storage type not defined".format(
863
            tpl=repr(self.op.disk_template)
864
          ),
865
          errors.ECODE_STATE
866
      )
867

    
868
      joinargs.append(cfg_storage)
869

    
870
      if self.op.file_storage_dir is not None:
871
        joinargs.append(self.op.file_storage_dir)
872

    
873
      if self.op.disk_template != constants.DT_GLUSTER:
874
        joinargs.append(self.op.instance_name)
875

    
876
      if len(joinargs) > 1:
877
        # pylint: disable=W0142
878
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
879
      else:
880
        self.instance_file_storage_dir = joinargs[0]
881

    
882
  def CheckPrereq(self): # pylint: disable=R0914
883
    """Check prerequisites.
884

885
    """
886
    self._CalculateFileStorageDir()
887

    
888
    if self.op.mode == constants.INSTANCE_IMPORT:
889
      export_info = self._ReadExportInfo()
890
      self._ReadExportParams(export_info)
891
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
892
    else:
893
      self._old_instance_name = None
894

    
895
    if (not self.cfg.GetVGName() and
896
        self.op.disk_template not in constants.DTS_NOT_LVM):
897
      raise errors.OpPrereqError("Cluster does not support lvm-based"
898
                                 " instances", errors.ECODE_STATE)
899

    
900
    if (self.op.hypervisor is None or
901
        self.op.hypervisor == constants.VALUE_AUTO):
902
      self.op.hypervisor = self.cfg.GetHypervisorType()
903

    
904
    cluster = self.cfg.GetClusterInfo()
905
    enabled_hvs = cluster.enabled_hypervisors
906
    if self.op.hypervisor not in enabled_hvs:
907
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
908
                                 " cluster (%s)" %
909
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
910
                                 errors.ECODE_STATE)
911

    
912
    # Check tag validity
913
    for tag in self.op.tags:
914
      objects.TaggableObject.ValidateTag(tag)
915

    
916
    # check hypervisor parameter syntax (locally)
917
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
918
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
919
                                      self.op.hvparams)
920
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
921
    hv_type.CheckParameterSyntax(filled_hvp)
922
    self.hv_full = filled_hvp
923
    # check that we don't specify global parameters on an instance
924
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
925
                         "instance", "cluster")
926

    
927
    # fill and remember the beparams dict
928
    self.be_full = _ComputeFullBeParams(self.op, cluster)
929

    
930
    # build os parameters
931
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
932

    
933
    # now that hvp/bep are in final format, let's reset to defaults,
934
    # if told to do so
935
    if self.op.identify_defaults:
936
      self._RevertToDefaults(cluster)
937

    
938
    # NIC buildup
939
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
940
                             self.proc.GetECId())
941

    
942
    # disk checks/pre-build
943
    default_vg = self.cfg.GetVGName()
944
    self.disks = ComputeDisks(self.op, default_vg)
945

    
946
    if self.op.mode == constants.INSTANCE_IMPORT:
947
      disk_images = []
948
      for idx in range(len(self.disks)):
949
        option = "disk%d_dump" % idx
950
        if export_info.has_option(constants.INISECT_INS, option):
951
          # FIXME: are the old os-es, disk sizes, etc. useful?
952
          export_name = export_info.get(constants.INISECT_INS, option)
953
          image = utils.PathJoin(self.op.src_path, export_name)
954
          disk_images.append(image)
955
        else:
956
          disk_images.append(False)
957

    
958
      self.src_images = disk_images
959

    
960
      if self.op.instance_name == self._old_instance_name:
961
        for idx, nic in enumerate(self.nics):
962
          if nic.mac == constants.VALUE_AUTO:
963
            nic_mac_ini = "nic%d_mac" % idx
964
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
965

    
966
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
967

    
968
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
969
    if self.op.ip_check:
970
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
971
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
972
                                   (self.check_ip, self.op.instance_name),
973
                                   errors.ECODE_NOTUNIQUE)
974

    
975
    #### mac address generation
976
    # By generating here the mac address both the allocator and the hooks get
977
    # the real final mac address rather than the 'auto' or 'generate' value.
978
    # There is a race condition between the generation and the instance object
979
    # creation, which means that we know the mac is valid now, but we're not
980
    # sure it will be when we actually add the instance. If things go bad
981
    # adding the instance will abort because of a duplicate mac, and the
982
    # creation job will fail.
983
    for nic in self.nics:
984
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
985
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
986

    
987
    #### allocator run
988

    
989
    if self.op.iallocator is not None:
990
      self._RunAllocator()
991

    
992
    # Release all unneeded node locks
993
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
994
                               self.op.src_node_uuid])
995
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
996
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
997
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
998

    
999
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1000
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1001
      "Node locks differ from node resource locks"
1002

    
1003
    #### node related checks
1004

    
1005
    # check primary node
1006
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1007
    assert self.pnode is not None, \
1008
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1009
    if pnode.offline:
1010
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1011
                                 pnode.name, errors.ECODE_STATE)
1012
    if pnode.drained:
1013
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1014
                                 pnode.name, errors.ECODE_STATE)
1015
    if not pnode.vm_capable:
1016
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1017
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1018

    
1019
    self.secondaries = []
1020

    
1021
    # Fill in any IPs from IP pools. This must happen here, because we need to
1022
    # know the nic's primary node, as specified by the iallocator
1023
    for idx, nic in enumerate(self.nics):
1024
      net_uuid = nic.network
1025
      if net_uuid is not None:
1026
        nobj = self.cfg.GetNetwork(net_uuid)
1027
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1028
        if netparams is None:
1029
          raise errors.OpPrereqError("No netparams found for network"
1030
                                     " %s. Probably not connected to"
1031
                                     " node's %s nodegroup" %
1032
                                     (nobj.name, self.pnode.name),
1033
                                     errors.ECODE_INVAL)
1034
        self.LogInfo("NIC/%d inherits netparams %s" %
1035
                     (idx, netparams.values()))
1036
        nic.nicparams = dict(netparams)
1037
        if nic.ip is not None:
1038
          if nic.ip.lower() == constants.NIC_IP_POOL:
1039
            try:
1040
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1041
            except errors.ReservationError:
1042
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1043
                                         " from the address pool" % idx,
1044
                                         errors.ECODE_STATE)
1045
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1046
          else:
1047
            try:
1048
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1049
            except errors.ReservationError:
1050
              raise errors.OpPrereqError("IP address %s already in use"
1051
                                         " or does not belong to network %s" %
1052
                                         (nic.ip, nobj.name),
1053
                                         errors.ECODE_NOTUNIQUE)
1054

    
1055
      # net is None, ip None or given
1056
      elif self.op.conflicts_check:
1057
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1058

    
1059
    # mirror node verification
1060
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1061
      if self.op.snode_uuid == pnode.uuid:
1062
        raise errors.OpPrereqError("The secondary node cannot be the"
1063
                                   " primary node", errors.ECODE_INVAL)
1064
      CheckNodeOnline(self, self.op.snode_uuid)
1065
      CheckNodeNotDrained(self, self.op.snode_uuid)
1066
      CheckNodeVmCapable(self, self.op.snode_uuid)
1067
      self.secondaries.append(self.op.snode_uuid)
1068

    
1069
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1070
      if pnode.group != snode.group:
1071
        self.LogWarning("The primary and secondary nodes are in two"
1072
                        " different node groups; the disk parameters"
1073
                        " from the first disk's node group will be"
1074
                        " used")
1075

    
1076
    nodes = [pnode]
1077
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1078
      nodes.append(snode)
1079
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1080
    excl_stor = compat.any(map(has_es, nodes))
1081
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1082
      raise errors.OpPrereqError("Disk template %s not supported with"
1083
                                 " exclusive storage" % self.op.disk_template,
1084
                                 errors.ECODE_STATE)
1085
    for disk in self.disks:
1086
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1087

    
1088
    node_uuids = [pnode.uuid] + self.secondaries
1089

    
1090
    if not self.adopt_disks:
1091
      if self.op.disk_template == constants.DT_RBD:
1092
        # _CheckRADOSFreeSpace() is just a placeholder.
1093
        # Any function that checks prerequisites can be placed here.
1094
        # Check if there is enough space on the RADOS cluster.
1095
        CheckRADOSFreeSpace()
1096
      elif self.op.disk_template == constants.DT_EXT:
1097
        # FIXME: Function that checks prereqs if needed
1098
        pass
1099
      elif self.op.disk_template in constants.DTS_LVM:
1100
        # Check lv size requirements, if not adopting
1101
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1102
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1103
      else:
1104
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1105
        pass
1106

    
1107
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1108
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1109
                                disk[constants.IDISK_ADOPT])
1110
                     for disk in self.disks])
1111
      if len(all_lvs) != len(self.disks):
1112
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1113
                                   errors.ECODE_INVAL)
1114
      for lv_name in all_lvs:
1115
        try:
1116
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1117
          # to ReserveLV uses the same syntax
1118
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1119
        except errors.ReservationError:
1120
          raise errors.OpPrereqError("LV named %s used by another instance" %
1121
                                     lv_name, errors.ECODE_NOTUNIQUE)
1122

    
1123
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1124
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1125

    
1126
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1127
                                       vg_names.payload.keys())[pnode.uuid]
1128
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1129
      node_lvs = node_lvs.payload
1130

    
1131
      delta = all_lvs.difference(node_lvs.keys())
1132
      if delta:
1133
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1134
                                   utils.CommaJoin(delta),
1135
                                   errors.ECODE_INVAL)
1136
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1137
      if online_lvs:
1138
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1139
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1140
                                   errors.ECODE_STATE)
1141
      # update the size of disk based on what is found
1142
      for dsk in self.disks:
1143
        dsk[constants.IDISK_SIZE] = \
1144
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1145
                                        dsk[constants.IDISK_ADOPT])][0]))
1146

    
1147
    elif self.op.disk_template == constants.DT_BLOCK:
1148
      # Normalize and de-duplicate device paths
1149
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1150
                       for disk in self.disks])
1151
      if len(all_disks) != len(self.disks):
1152
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1153
                                   errors.ECODE_INVAL)
1154
      baddisks = [d for d in all_disks
1155
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1156
      if baddisks:
1157
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1158
                                   " cannot be adopted" %
1159
                                   (utils.CommaJoin(baddisks),
1160
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1161
                                   errors.ECODE_INVAL)
1162

    
1163
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1164
                                            list(all_disks))[pnode.uuid]
1165
      node_disks.Raise("Cannot get block device information from node %s" %
1166
                       pnode.name)
1167
      node_disks = node_disks.payload
1168
      delta = all_disks.difference(node_disks.keys())
1169
      if delta:
1170
        raise errors.OpPrereqError("Missing block device(s): %s" %
1171
                                   utils.CommaJoin(delta),
1172
                                   errors.ECODE_INVAL)
1173
      for dsk in self.disks:
1174
        dsk[constants.IDISK_SIZE] = \
1175
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1176

    
1177
    # Check disk access param to be compatible with specified hypervisor
1178
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1179
    node_group = self.cfg.GetNodeGroup(node_info.group)
1180
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1181
    access_type = disk_params[self.op.disk_template].get(
1182
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1183
    )
1184

    
1185
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1186
                                            self.op.disk_template,
1187
                                            access_type):
1188
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1189
                                 " used with %s disk access param" %
1190
                                 (self.op.hypervisor, access_type),
1191
                                  errors.ECODE_STATE)
1192

    
1193
    # Verify instance specs
1194
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1195
    ispec = {
1196
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1197
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1198
      constants.ISPEC_DISK_COUNT: len(self.disks),
1199
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1200
                                  for disk in self.disks],
1201
      constants.ISPEC_NIC_COUNT: len(self.nics),
1202
      constants.ISPEC_SPINDLE_USE: spindle_use,
1203
      }
1204

    
1205
    group_info = self.cfg.GetNodeGroup(pnode.group)
1206
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1207
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1208
                                               self.op.disk_template)
1209
    if not self.op.ignore_ipolicy and res:
1210
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1211
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1212
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1213

    
1214
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1215

    
1216
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1217
    # check OS parameters (remotely)
1218
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1219

    
1220
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1221

    
1222
    #TODO: _CheckExtParams (remotely)
1223
    # Check parameters for extstorage
1224

    
1225
    # memory check on primary node
1226
    #TODO(dynmem): use MINMEM for checking
1227
    if self.op.start:
1228
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1229
                                self.op.hvparams)
1230
      CheckNodeFreeMemory(self, self.pnode.uuid,
1231
                          "creating instance %s" % self.op.instance_name,
1232
                          self.be_full[constants.BE_MAXMEM],
1233
                          self.op.hypervisor, hvfull)
1234

    
1235
    self.dry_run_result = list(node_uuids)
1236

    
1237
  def Exec(self, feedback_fn):
1238
    """Create and add the instance to the cluster.
1239

1240
    """
1241
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1242
                self.owned_locks(locking.LEVEL_NODE)), \
1243
      "Node locks differ from node resource locks"
1244
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1245

    
1246
    ht_kind = self.op.hypervisor
1247
    if ht_kind in constants.HTS_REQ_PORT:
1248
      network_port = self.cfg.AllocatePort()
1249
    else:
1250
      network_port = None
1251

    
1252
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1253

    
1254
    # This is ugly but we got a chicken-egg problem here
1255
    # We can only take the group disk parameters, as the instance
1256
    # has no disks yet (we are generating them right here).
1257
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1258
    disks = GenerateDiskTemplate(self,
1259
                                 self.op.disk_template,
1260
                                 instance_uuid, self.pnode.uuid,
1261
                                 self.secondaries,
1262
                                 self.disks,
1263
                                 self.instance_file_storage_dir,
1264
                                 self.op.file_driver,
1265
                                 0,
1266
                                 feedback_fn,
1267
                                 self.cfg.GetGroupDiskParams(nodegroup))
1268

    
1269
    iobj = objects.Instance(name=self.op.instance_name,
1270
                            uuid=instance_uuid,
1271
                            os=self.op.os_type,
1272
                            primary_node=self.pnode.uuid,
1273
                            nics=self.nics, disks=disks,
1274
                            disk_template=self.op.disk_template,
1275
                            disks_active=False,
1276
                            admin_state=constants.ADMINST_DOWN,
1277
                            network_port=network_port,
1278
                            beparams=self.op.beparams,
1279
                            hvparams=self.op.hvparams,
1280
                            hypervisor=self.op.hypervisor,
1281
                            osparams=self.op.osparams,
1282
                            )
1283

    
1284
    if self.op.tags:
1285
      for tag in self.op.tags:
1286
        iobj.AddTag(tag)
1287

    
1288
    if self.adopt_disks:
1289
      if self.op.disk_template == constants.DT_PLAIN:
1290
        # rename LVs to the newly-generated names; we need to construct
1291
        # 'fake' LV disks with the old data, plus the new unique_id
1292
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1293
        rename_to = []
1294
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1295
          rename_to.append(t_dsk.logical_id)
1296
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1297
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1298
                                               zip(tmp_disks, rename_to))
1299
        result.Raise("Failed to rename adoped LVs")
1300
    else:
1301
      feedback_fn("* creating instance disks...")
1302
      try:
1303
        CreateDisks(self, iobj)
1304
      except errors.OpExecError:
1305
        self.LogWarning("Device creation failed")
1306
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1307
        raise
1308

    
1309
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1310

    
1311
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1312

    
1313
    # Declare that we don't want to remove the instance lock anymore, as we've
1314
    # added the instance to the config
1315
    del self.remove_locks[locking.LEVEL_INSTANCE]
1316

    
1317
    if self.op.mode == constants.INSTANCE_IMPORT:
1318
      # Release unused nodes
1319
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1320
    else:
1321
      # Release all nodes
1322
      ReleaseLocks(self, locking.LEVEL_NODE)
1323

    
1324
    disk_abort = False
1325
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1326
      feedback_fn("* wiping instance disks...")
1327
      try:
1328
        WipeDisks(self, iobj)
1329
      except errors.OpExecError, err:
1330
        logging.exception("Wiping disks failed")
1331
        self.LogWarning("Wiping instance disks failed (%s)", err)
1332
        disk_abort = True
1333

    
1334
    if disk_abort:
1335
      # Something is already wrong with the disks, don't do anything else
1336
      pass
1337
    elif self.op.wait_for_sync:
1338
      disk_abort = not WaitForSync(self, iobj)
1339
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1340
      # make sure the disks are not degraded (still sync-ing is ok)
1341
      feedback_fn("* checking mirrors status")
1342
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1343
    else:
1344
      disk_abort = False
1345

    
1346
    if disk_abort:
1347
      RemoveDisks(self, iobj)
1348
      self.cfg.RemoveInstance(iobj.uuid)
1349
      # Make sure the instance lock gets removed
1350
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1351
      raise errors.OpExecError("There are some degraded disks for"
1352
                               " this instance")
1353

    
1354
    # instance disks are now active
1355
    iobj.disks_active = True
1356

    
1357
    # Release all node resource locks
1358
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1359

    
1360
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1361
      if self.op.mode == constants.INSTANCE_CREATE:
1362
        if not self.op.no_install:
1363
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1364
                        not self.op.wait_for_sync)
1365
          if pause_sync:
1366
            feedback_fn("* pausing disk sync to install instance OS")
1367
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1368
                                                              (iobj.disks,
1369
                                                               iobj), True)
1370
            for idx, success in enumerate(result.payload):
1371
              if not success:
1372
                logging.warn("pause-sync of instance %s for disk %d failed",
1373
                             self.op.instance_name, idx)
1374

    
1375
          feedback_fn("* running the instance OS create scripts...")
1376
          # FIXME: pass debug option from opcode to backend
1377
          os_add_result = \
1378
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1379
                                          self.op.debug_level)
1380
          if pause_sync:
1381
            feedback_fn("* resuming disk sync")
1382
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1383
                                                              (iobj.disks,
1384
                                                               iobj), False)
1385
            for idx, success in enumerate(result.payload):
1386
              if not success:
1387
                logging.warn("resume-sync of instance %s for disk %d failed",
1388
                             self.op.instance_name, idx)
1389

    
1390
          os_add_result.Raise("Could not add os for instance %s"
1391
                              " on node %s" % (self.op.instance_name,
1392
                                               self.pnode.name))
1393

    
1394
      else:
1395
        if self.op.mode == constants.INSTANCE_IMPORT:
1396
          feedback_fn("* running the instance OS import scripts...")
1397

    
1398
          transfers = []
1399

    
1400
          for idx, image in enumerate(self.src_images):
1401
            if not image:
1402
              continue
1403

    
1404
            # FIXME: pass debug option from opcode to backend
1405
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1406
                                               constants.IEIO_FILE, (image, ),
1407
                                               constants.IEIO_SCRIPT,
1408
                                               ((iobj.disks[idx], iobj), idx),
1409
                                               None)
1410
            transfers.append(dt)
1411

    
1412
          import_result = \
1413
            masterd.instance.TransferInstanceData(self, feedback_fn,
1414
                                                  self.op.src_node_uuid,
1415
                                                  self.pnode.uuid,
1416
                                                  self.pnode.secondary_ip,
1417
                                                  self.op.compress,
1418
                                                  iobj, transfers)
1419
          if not compat.all(import_result):
1420
            self.LogWarning("Some disks for instance %s on node %s were not"
1421
                            " imported successfully" % (self.op.instance_name,
1422
                                                        self.pnode.name))
1423

    
1424
          rename_from = self._old_instance_name
1425

    
1426
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1427
          feedback_fn("* preparing remote import...")
1428
          # The source cluster will stop the instance before attempting to make
1429
          # a connection. In some cases stopping an instance can take a long
1430
          # time, hence the shutdown timeout is added to the connection
1431
          # timeout.
1432
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1433
                             self.op.source_shutdown_timeout)
1434
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1435

    
1436
          assert iobj.primary_node == self.pnode.uuid
1437
          disk_results = \
1438
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1439
                                          self.source_x509_ca,
1440
                                          self._cds, self.op.compress, timeouts)
1441
          if not compat.all(disk_results):
1442
            # TODO: Should the instance still be started, even if some disks
1443
            # failed to import (valid for local imports, too)?
1444
            self.LogWarning("Some disks for instance %s on node %s were not"
1445
                            " imported successfully" % (self.op.instance_name,
1446
                                                        self.pnode.name))
1447

    
1448
          rename_from = self.source_instance_name
1449

    
1450
        else:
1451
          # also checked in the prereq part
1452
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1453
                                       % self.op.mode)
1454

    
1455
        # Run rename script on newly imported instance
1456
        assert iobj.name == self.op.instance_name
1457
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1458
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1459
                                                   rename_from,
1460
                                                   self.op.debug_level)
1461
        result.Warn("Failed to run rename script for %s on node %s" %
1462
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1463

    
1464
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1465

    
1466
    if self.op.start:
1467
      iobj.admin_state = constants.ADMINST_UP
1468
      self.cfg.Update(iobj, feedback_fn)
1469
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1470
                   self.pnode.name)
1471
      feedback_fn("* starting instance...")
1472
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1473
                                            False, self.op.reason)
1474
      result.Raise("Could not start instance")
1475

    
1476
    return list(iobj.all_nodes)
1477

    
1478

    
1479
class LUInstanceRename(LogicalUnit):
1480
  """Rename an instance.
1481

1482
  """
1483
  HPATH = "instance-rename"
1484
  HTYPE = constants.HTYPE_INSTANCE
1485

    
1486
  def CheckArguments(self):
1487
    """Check arguments.
1488

1489
    """
1490
    if self.op.ip_check and not self.op.name_check:
1491
      # TODO: make the ip check more flexible and not depend on the name check
1492
      raise errors.OpPrereqError("IP address check requires a name check",
1493
                                 errors.ECODE_INVAL)
1494

    
1495
  def BuildHooksEnv(self):
1496
    """Build hooks env.
1497

1498
    This runs on master, primary and secondary nodes of the instance.
1499

1500
    """
1501
    env = BuildInstanceHookEnvByObject(self, self.instance)
1502
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1503
    return env
1504

    
1505
  def BuildHooksNodes(self):
1506
    """Build hooks nodes.
1507

1508
    """
1509
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1510
    return (nl, nl)
1511

    
1512
  def CheckPrereq(self):
1513
    """Check prerequisites.
1514

1515
    This checks that the instance is in the cluster and is not running.
1516

1517
    """
1518
    (self.op.instance_uuid, self.op.instance_name) = \
1519
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1520
                                self.op.instance_name)
1521
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1522
    assert instance is not None
1523

    
1524
    # It should actually not happen that an instance is running with a disabled
1525
    # disk template, but in case it does, the renaming of file-based instances
1526
    # will fail horribly. Thus, we test it before.
1527
    if (instance.disk_template in constants.DTS_FILEBASED and
1528
        self.op.new_name != instance.name):
1529
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1530
                               instance.disk_template)
1531

    
1532
    CheckNodeOnline(self, instance.primary_node)
1533
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1534
                       msg="cannot rename")
1535
    self.instance = instance
1536

    
1537
    new_name = self.op.new_name
1538
    if self.op.name_check:
1539
      hostname = _CheckHostnameSane(self, new_name)
1540
      new_name = self.op.new_name = hostname.name
1541
      if (self.op.ip_check and
1542
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1543
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1544
                                   (hostname.ip, new_name),
1545
                                   errors.ECODE_NOTUNIQUE)
1546

    
1547
    instance_names = [inst.name for
1548
                      inst in self.cfg.GetAllInstancesInfo().values()]
1549
    if new_name in instance_names and new_name != instance.name:
1550
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1551
                                 new_name, errors.ECODE_EXISTS)
1552

    
1553
  def Exec(self, feedback_fn):
1554
    """Rename the instance.
1555

1556
    """
1557
    old_name = self.instance.name
1558

    
1559
    rename_file_storage = False
1560
    if (self.instance.disk_template in (constants.DT_FILE,
1561
                                        constants.DT_SHARED_FILE) and
1562
        self.op.new_name != self.instance.name):
1563
      old_file_storage_dir = os.path.dirname(
1564
                               self.instance.disks[0].logical_id[1])
1565
      rename_file_storage = True
1566

    
1567
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1568
    # Change the instance lock. This is definitely safe while we hold the BGL.
1569
    # Otherwise the new lock would have to be added in acquired mode.
1570
    assert self.REQ_BGL
1571
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1572
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1573
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1574

    
1575
    # re-read the instance from the configuration after rename
1576
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1577

    
1578
    if rename_file_storage:
1579
      new_file_storage_dir = os.path.dirname(
1580
                               renamed_inst.disks[0].logical_id[1])
1581
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1582
                                                     old_file_storage_dir,
1583
                                                     new_file_storage_dir)
1584
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1585
                   " (but the instance has been renamed in Ganeti)" %
1586
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1587
                    old_file_storage_dir, new_file_storage_dir))
1588

    
1589
    StartInstanceDisks(self, renamed_inst, None)
1590
    # update info on disks
1591
    info = GetInstanceInfoText(renamed_inst)
1592
    for (idx, disk) in enumerate(renamed_inst.disks):
1593
      for node_uuid in renamed_inst.all_nodes:
1594
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1595
                                                (disk, renamed_inst), info)
1596
        result.Warn("Error setting info on node %s for disk %s" %
1597
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1598
    try:
1599
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1600
                                                 renamed_inst, old_name,
1601
                                                 self.op.debug_level)
1602
      result.Warn("Could not run OS rename script for instance %s on node %s"
1603
                  " (but the instance has been renamed in Ganeti)" %
1604
                  (renamed_inst.name,
1605
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1606
                  self.LogWarning)
1607
    finally:
1608
      ShutdownInstanceDisks(self, renamed_inst)
1609

    
1610
    return renamed_inst.name
1611

    
1612

    
1613
class LUInstanceRemove(LogicalUnit):
1614
  """Remove an instance.
1615

1616
  """
1617
  HPATH = "instance-remove"
1618
  HTYPE = constants.HTYPE_INSTANCE
1619
  REQ_BGL = False
1620

    
1621
  def ExpandNames(self):
1622
    self._ExpandAndLockInstance()
1623
    self.needed_locks[locking.LEVEL_NODE] = []
1624
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1625
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1626

    
1627
  def DeclareLocks(self, level):
1628
    if level == locking.LEVEL_NODE:
1629
      self._LockInstancesNodes()
1630
    elif level == locking.LEVEL_NODE_RES:
1631
      # Copy node locks
1632
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1633
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1634

    
1635
  def BuildHooksEnv(self):
1636
    """Build hooks env.
1637

1638
    This runs on master, primary and secondary nodes of the instance.
1639

1640
    """
1641
    env = BuildInstanceHookEnvByObject(self, self.instance)
1642
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1643
    return env
1644

    
1645
  def BuildHooksNodes(self):
1646
    """Build hooks nodes.
1647

1648
    """
1649
    nl = [self.cfg.GetMasterNode()]
1650
    nl_post = list(self.instance.all_nodes) + nl
1651
    return (nl, nl_post)
1652

    
1653
  def CheckPrereq(self):
1654
    """Check prerequisites.
1655

1656
    This checks that the instance is in the cluster.
1657

1658
    """
1659
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1660
    assert self.instance is not None, \
1661
      "Cannot retrieve locked instance %s" % self.op.instance_name
1662

    
1663
  def Exec(self, feedback_fn):
1664
    """Remove the instance.
1665

1666
    """
1667
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1668
                 self.cfg.GetNodeName(self.instance.primary_node))
1669

    
1670
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1671
                                             self.instance,
1672
                                             self.op.shutdown_timeout,
1673
                                             self.op.reason)
1674
    if self.op.ignore_failures:
1675
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1676
    else:
1677
      result.Raise("Could not shutdown instance %s on node %s" %
1678
                   (self.instance.name,
1679
                    self.cfg.GetNodeName(self.instance.primary_node)))
1680

    
1681
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1682
            self.owned_locks(locking.LEVEL_NODE_RES))
1683
    assert not (set(self.instance.all_nodes) -
1684
                self.owned_locks(locking.LEVEL_NODE)), \
1685
      "Not owning correct locks"
1686

    
1687
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1688

    
1689

    
1690
class LUInstanceMove(LogicalUnit):
1691
  """Move an instance by data-copying.
1692

1693
  """
1694
  HPATH = "instance-move"
1695
  HTYPE = constants.HTYPE_INSTANCE
1696
  REQ_BGL = False
1697

    
1698
  def ExpandNames(self):
1699
    self._ExpandAndLockInstance()
1700
    (self.op.target_node_uuid, self.op.target_node) = \
1701
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1702
                            self.op.target_node)
1703
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1704
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1705
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1706

    
1707
  def DeclareLocks(self, level):
1708
    if level == locking.LEVEL_NODE:
1709
      self._LockInstancesNodes(primary_only=True)
1710
    elif level == locking.LEVEL_NODE_RES:
1711
      # Copy node locks
1712
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1713
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1714

    
1715
  def BuildHooksEnv(self):
1716
    """Build hooks env.
1717

1718
    This runs on master, primary and target nodes of the instance.
1719

1720
    """
1721
    env = {
1722
      "TARGET_NODE": self.op.target_node,
1723
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1724
      }
1725
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1726
    return env
1727

    
1728
  def BuildHooksNodes(self):
1729
    """Build hooks nodes.
1730

1731
    """
1732
    nl = [
1733
      self.cfg.GetMasterNode(),
1734
      self.instance.primary_node,
1735
      self.op.target_node_uuid,
1736
      ]
1737
    return (nl, nl)
1738

    
1739
  def CheckPrereq(self):
1740
    """Check prerequisites.
1741

1742
    This checks that the instance is in the cluster.
1743

1744
    """
1745
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1746
    assert self.instance is not None, \
1747
      "Cannot retrieve locked instance %s" % self.op.instance_name
1748

    
1749
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1750
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1751
                                 self.instance.disk_template,
1752
                                 errors.ECODE_STATE)
1753

    
1754
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1755
    assert target_node is not None, \
1756
      "Cannot retrieve locked node %s" % self.op.target_node
1757

    
1758
    self.target_node_uuid = target_node.uuid
1759
    if target_node.uuid == self.instance.primary_node:
1760
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1761
                                 (self.instance.name, target_node.name),
1762
                                 errors.ECODE_STATE)
1763

    
1764
    cluster = self.cfg.GetClusterInfo()
1765
    bep = cluster.FillBE(self.instance)
1766

    
1767
    for idx, dsk in enumerate(self.instance.disks):
1768
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1769
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1770
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1771
                                   " cannot copy" % idx, errors.ECODE_STATE)
1772

    
1773
    CheckNodeOnline(self, target_node.uuid)
1774
    CheckNodeNotDrained(self, target_node.uuid)
1775
    CheckNodeVmCapable(self, target_node.uuid)
1776
    group_info = self.cfg.GetNodeGroup(target_node.group)
1777
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1778
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1779
                           ignore=self.op.ignore_ipolicy)
1780

    
1781
    if self.instance.admin_state == constants.ADMINST_UP:
1782
      # check memory requirements on the target node
1783
      CheckNodeFreeMemory(
1784
          self, target_node.uuid, "failing over instance %s" %
1785
          self.instance.name, bep[constants.BE_MAXMEM],
1786
          self.instance.hypervisor,
1787
          cluster.hvparams[self.instance.hypervisor])
1788
    else:
1789
      self.LogInfo("Not checking memory on the secondary node as"
1790
                   " instance will not be started")
1791

    
1792
    # check bridge existance
1793
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1794

    
1795
  def Exec(self, feedback_fn):
1796
    """Move an instance.
1797

1798
    The move is done by shutting it down on its present node, copying
1799
    the data over (slow) and starting it on the new node.
1800

1801
    """
1802
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1803
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1804

    
1805
    self.LogInfo("Shutting down instance %s on source node %s",
1806
                 self.instance.name, source_node.name)
1807

    
1808
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1809
            self.owned_locks(locking.LEVEL_NODE_RES))
1810

    
1811
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1812
                                             self.op.shutdown_timeout,
1813
                                             self.op.reason)
1814
    if self.op.ignore_consistency:
1815
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1816
                  " anyway. Please make sure node %s is down. Error details" %
1817
                  (self.instance.name, source_node.name, source_node.name),
1818
                  self.LogWarning)
1819
    else:
1820
      result.Raise("Could not shutdown instance %s on node %s" %
1821
                   (self.instance.name, source_node.name))
1822

    
1823
    # create the target disks
1824
    try:
1825
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1826
    except errors.OpExecError:
1827
      self.LogWarning("Device creation failed")
1828
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1829
      raise
1830

    
1831
    errs = []
1832
    transfers = []
1833
    # activate, get path, create transfer jobs
1834
    for idx, disk in enumerate(self.instance.disks):
1835
      # FIXME: pass debug option from opcode to backend
1836
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1837
                                         constants.IEIO_RAW_DISK,
1838
                                         (disk, self.instance),
1839
                                         constants.IEIO_RAW_DISK,
1840
                                         (disk, self.instance),
1841
                                         None)
1842
      transfers.append(dt)
1843

    
1844
    import_result = \
1845
      masterd.instance.TransferInstanceData(self, feedback_fn,
1846
                                            source_node.uuid,
1847
                                            target_node.uuid,
1848
                                            target_node.secondary_ip,
1849
                                            self.op.compress,
1850
                                            self.instance, transfers)
1851
    if not compat.all(import_result):
1852
      errs.append("Failed to transfer instance data")
1853

    
1854
    if errs:
1855
      self.LogWarning("Some disks failed to copy, aborting")
1856
      try:
1857
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1858
      finally:
1859
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1860
        raise errors.OpExecError("Errors during disk copy: %s" %
1861
                                 (",".join(errs),))
1862

    
1863
    self.instance.primary_node = target_node.uuid
1864
    self.cfg.Update(self.instance, feedback_fn)
1865

    
1866
    self.LogInfo("Removing the disks on the original node")
1867
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1868

    
1869
    # Only start the instance if it's marked as up
1870
    if self.instance.admin_state == constants.ADMINST_UP:
1871
      self.LogInfo("Starting instance %s on node %s",
1872
                   self.instance.name, target_node.name)
1873

    
1874
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1875
                                          ignore_secondaries=True)
1876
      if not disks_ok:
1877
        ShutdownInstanceDisks(self, self.instance)
1878
        raise errors.OpExecError("Can't activate the instance's disks")
1879

    
1880
      result = self.rpc.call_instance_start(target_node.uuid,
1881
                                            (self.instance, None, None), False,
1882
                                            self.op.reason)
1883
      msg = result.fail_msg
1884
      if msg:
1885
        ShutdownInstanceDisks(self, self.instance)
1886
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1887
                                 (self.instance.name, target_node.name, msg))
1888

    
1889

    
1890
class LUInstanceMultiAlloc(NoHooksLU):
1891
  """Allocates multiple instances at the same time.
1892

1893
  """
1894
  REQ_BGL = False
1895

    
1896
  def CheckArguments(self):
1897
    """Check arguments.
1898

1899
    """
1900
    nodes = []
1901
    for inst in self.op.instances:
1902
      if inst.iallocator is not None:
1903
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1904
                                   " instance objects", errors.ECODE_INVAL)
1905
      nodes.append(bool(inst.pnode))
1906
      if inst.disk_template in constants.DTS_INT_MIRROR:
1907
        nodes.append(bool(inst.snode))
1908

    
1909
    has_nodes = compat.any(nodes)
1910
    if compat.all(nodes) ^ has_nodes:
1911
      raise errors.OpPrereqError("There are instance objects providing"
1912
                                 " pnode/snode while others do not",
1913
                                 errors.ECODE_INVAL)
1914

    
1915
    if not has_nodes and self.op.iallocator is None:
1916
      default_iallocator = self.cfg.GetDefaultIAllocator()
1917
      if default_iallocator:
1918
        self.op.iallocator = default_iallocator
1919
      else:
1920
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1921
                                   " given and no cluster-wide default"
1922
                                   " iallocator found; please specify either"
1923
                                   " an iallocator or nodes on the instances"
1924
                                   " or set a cluster-wide default iallocator",
1925
                                   errors.ECODE_INVAL)
1926

    
1927
    _CheckOpportunisticLocking(self.op)
1928

    
1929
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1930
    if dups:
1931
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1932
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1933

    
1934
  def ExpandNames(self):
1935
    """Calculate the locks.
1936

1937
    """
1938
    self.share_locks = ShareAll()
1939
    self.needed_locks = {
1940
      # iallocator will select nodes and even if no iallocator is used,
1941
      # collisions with LUInstanceCreate should be avoided
1942
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1943
      }
1944

    
1945
    if self.op.iallocator:
1946
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1947
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1948

    
1949
      if self.op.opportunistic_locking:
1950
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1951
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1952
    else:
1953
      nodeslist = []
1954
      for inst in self.op.instances:
1955
        (inst.pnode_uuid, inst.pnode) = \
1956
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1957
        nodeslist.append(inst.pnode_uuid)
1958
        if inst.snode is not None:
1959
          (inst.snode_uuid, inst.snode) = \
1960
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1961
          nodeslist.append(inst.snode_uuid)
1962

    
1963
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1964
      # Lock resources of instance's primary and secondary nodes (copy to
1965
      # prevent accidential modification)
1966
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1967

    
1968
  def CheckPrereq(self):
1969
    """Check prerequisite.
1970

1971
    """
1972
    if self.op.iallocator:
1973
      cluster = self.cfg.GetClusterInfo()
1974
      default_vg = self.cfg.GetVGName()
1975
      ec_id = self.proc.GetECId()
1976

    
1977
      if self.op.opportunistic_locking:
1978
        # Only consider nodes for which a lock is held
1979
        node_whitelist = self.cfg.GetNodeNames(
1980
                           list(self.owned_locks(locking.LEVEL_NODE)))
1981
      else:
1982
        node_whitelist = None
1983

    
1984
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1985
                                           _ComputeNics(op, cluster, None,
1986
                                                        self.cfg, ec_id),
1987
                                           _ComputeFullBeParams(op, cluster),
1988
                                           node_whitelist)
1989
               for op in self.op.instances]
1990

    
1991
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1992
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1993

    
1994
      ial.Run(self.op.iallocator)
1995

    
1996
      if not ial.success:
1997
        raise errors.OpPrereqError("Can't compute nodes using"
1998
                                   " iallocator '%s': %s" %
1999
                                   (self.op.iallocator, ial.info),
2000
                                   errors.ECODE_NORES)
2001

    
2002
      self.ia_result = ial.result
2003

    
2004
    if self.op.dry_run:
2005
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2006
        constants.JOB_IDS_KEY: [],
2007
        })
2008

    
2009
  def _ConstructPartialResult(self):
2010
    """Contructs the partial result.
2011

2012
    """
2013
    if self.op.iallocator:
2014
      (allocatable, failed_insts) = self.ia_result
2015
      allocatable_insts = map(compat.fst, allocatable)
2016
    else:
2017
      allocatable_insts = [op.instance_name for op in self.op.instances]
2018
      failed_insts = []
2019

    
2020
    return {
2021
      constants.ALLOCATABLE_KEY: allocatable_insts,
2022
      constants.FAILED_KEY: failed_insts,
2023
      }
2024

    
2025
  def Exec(self, feedback_fn):
2026
    """Executes the opcode.
2027

2028
    """
2029
    jobs = []
2030
    if self.op.iallocator:
2031
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2032
      (allocatable, failed) = self.ia_result
2033

    
2034
      for (name, node_names) in allocatable:
2035
        op = op2inst.pop(name)
2036

    
2037
        (op.pnode_uuid, op.pnode) = \
2038
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2039
        if len(node_names) > 1:
2040
          (op.snode_uuid, op.snode) = \
2041
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2042

    
2043
          jobs.append([op])
2044

    
2045
        missing = set(op2inst.keys()) - set(failed)
2046
        assert not missing, \
2047
          "Iallocator did return incomplete result: %s" % \
2048
          utils.CommaJoin(missing)
2049
    else:
2050
      jobs.extend([op] for op in self.op.instances)
2051

    
2052
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2053

    
2054

    
2055
class _InstNicModPrivate:
2056
  """Data structure for network interface modifications.
2057

2058
  Used by L{LUInstanceSetParams}.
2059

2060
  """
2061
  def __init__(self):
2062
    self.params = None
2063
    self.filled = None
2064

    
2065

    
2066
def _PrepareContainerMods(mods, private_fn):
2067
  """Prepares a list of container modifications by adding a private data field.
2068

2069
  @type mods: list of tuples; (operation, index, parameters)
2070
  @param mods: List of modifications
2071
  @type private_fn: callable or None
2072
  @param private_fn: Callable for constructing a private data field for a
2073
    modification
2074
  @rtype: list
2075

2076
  """
2077
  if private_fn is None:
2078
    fn = lambda: None
2079
  else:
2080
    fn = private_fn
2081

    
2082
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2083

    
2084

    
2085
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2086
  """Checks if nodes have enough physical CPUs
2087

2088
  This function checks if all given nodes have the needed number of
2089
  physical CPUs. In case any node has less CPUs or we cannot get the
2090
  information from the node, this function raises an OpPrereqError
2091
  exception.
2092

2093
  @type lu: C{LogicalUnit}
2094
  @param lu: a logical unit from which we get configuration data
2095
  @type node_uuids: C{list}
2096
  @param node_uuids: the list of node UUIDs to check
2097
  @type requested: C{int}
2098
  @param requested: the minimum acceptable number of physical CPUs
2099
  @type hypervisor_specs: list of pairs (string, dict of strings)
2100
  @param hypervisor_specs: list of hypervisor specifications in
2101
      pairs (hypervisor_name, hvparams)
2102
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2103
      or we cannot check the node
2104

2105
  """
2106
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2107
  for node_uuid in node_uuids:
2108
    info = nodeinfo[node_uuid]
2109
    node_name = lu.cfg.GetNodeName(node_uuid)
2110
    info.Raise("Cannot get current information from node %s" % node_name,
2111
               prereq=True, ecode=errors.ECODE_ENVIRON)
2112
    (_, _, (hv_info, )) = info.payload
2113
    num_cpus = hv_info.get("cpu_total", None)
2114
    if not isinstance(num_cpus, int):
2115
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2116
                                 " on node %s, result was '%s'" %
2117
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2118
    if requested > num_cpus:
2119
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2120
                                 "required" % (node_name, num_cpus, requested),
2121
                                 errors.ECODE_NORES)
2122

    
2123

    
2124
def GetItemFromContainer(identifier, kind, container):
2125
  """Return the item refered by the identifier.
2126

2127
  @type identifier: string
2128
  @param identifier: Item index or name or UUID
2129
  @type kind: string
2130
  @param kind: One-word item description
2131
  @type container: list
2132
  @param container: Container to get the item from
2133

2134
  """
2135
  # Index
2136
  try:
2137
    idx = int(identifier)
2138
    if idx == -1:
2139
      # Append
2140
      absidx = len(container) - 1
2141
    elif idx < 0:
2142
      raise IndexError("Not accepting negative indices other than -1")
2143
    elif idx > len(container):
2144
      raise IndexError("Got %s index %s, but there are only %s" %
2145
                       (kind, idx, len(container)))
2146
    else:
2147
      absidx = idx
2148
    return (absidx, container[idx])
2149
  except ValueError:
2150
    pass
2151

    
2152
  for idx, item in enumerate(container):
2153
    if item.uuid == identifier or item.name == identifier:
2154
      return (idx, item)
2155

    
2156
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2157
                             (kind, identifier), errors.ECODE_NOENT)
2158

    
2159

    
2160
def _ApplyContainerMods(kind, container, chgdesc, mods,
2161
                        create_fn, modify_fn, remove_fn,
2162
                        post_add_fn=None):
2163
  """Applies descriptions in C{mods} to C{container}.
2164

2165
  @type kind: string
2166
  @param kind: One-word item description
2167
  @type container: list
2168
  @param container: Container to modify
2169
  @type chgdesc: None or list
2170
  @param chgdesc: List of applied changes
2171
  @type mods: list
2172
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2173
  @type create_fn: callable
2174
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2175
    receives absolute item index, parameters and private data object as added
2176
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2177
    as list
2178
  @type modify_fn: callable
2179
  @param modify_fn: Callback for modifying an existing item
2180
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2181
    and private data object as added by L{_PrepareContainerMods}, returns
2182
    changes as list
2183
  @type remove_fn: callable
2184
  @param remove_fn: Callback on removing item; receives absolute item index,
2185
    item and private data object as added by L{_PrepareContainerMods}
2186
  @type post_add_fn: callable
2187
  @param post_add_fn: Callable for post-processing a newly created item after
2188
    it has been put into the container. It receives the index of the new item
2189
    and the new item as parameters.
2190

2191
  """
2192
  for (op, identifier, params, private) in mods:
2193
    changes = None
2194

    
2195
    if op == constants.DDM_ADD:
2196
      # Calculate where item will be added
2197
      # When adding an item, identifier can only be an index
2198
      try:
2199
        idx = int(identifier)
2200
      except ValueError:
2201
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2202
                                   " identifier for %s" % constants.DDM_ADD,
2203
                                   errors.ECODE_INVAL)
2204
      if idx == -1:
2205
        addidx = len(container)
2206
      else:
2207
        if idx < 0:
2208
          raise IndexError("Not accepting negative indices other than -1")
2209
        elif idx > len(container):
2210
          raise IndexError("Got %s index %s, but there are only %s" %
2211
                           (kind, idx, len(container)))
2212
        addidx = idx
2213

    
2214
      if create_fn is None:
2215
        item = params
2216
      else:
2217
        (item, changes) = create_fn(addidx, params, private)
2218

    
2219
      if idx == -1:
2220
        container.append(item)
2221
      else:
2222
        assert idx >= 0
2223
        assert idx <= len(container)
2224
        # list.insert does so before the specified index
2225
        container.insert(idx, item)
2226

    
2227
      if post_add_fn is not None:
2228
        post_add_fn(addidx, item)
2229

    
2230
    else:
2231
      # Retrieve existing item
2232
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2233

    
2234
      if op == constants.DDM_REMOVE:
2235
        assert not params
2236

    
2237
        changes = [("%s/%s" % (kind, absidx), "remove")]
2238

    
2239
        if remove_fn is not None:
2240
          msg = remove_fn(absidx, item, private)
2241
          if msg:
2242
            changes.append(("%s/%s" % (kind, absidx), msg))
2243

    
2244
        assert container[absidx] == item
2245
        del container[absidx]
2246
      elif op == constants.DDM_MODIFY:
2247
        if modify_fn is not None:
2248
          changes = modify_fn(absidx, item, params, private)
2249
      else:
2250
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2251

    
2252
    assert _TApplyContModsCbChanges(changes)
2253

    
2254
    if not (chgdesc is None or changes is None):
2255
      chgdesc.extend(changes)
2256

    
2257

    
2258
def _UpdateIvNames(base_index, disks):
2259
  """Updates the C{iv_name} attribute of disks.
2260

2261
  @type disks: list of L{objects.Disk}
2262

2263
  """
2264
  for (idx, disk) in enumerate(disks):
2265
    disk.iv_name = "disk/%s" % (base_index + idx, )
2266

    
2267

    
2268
class LUInstanceSetParams(LogicalUnit):
2269
  """Modifies an instances's parameters.
2270

2271
  """
2272
  HPATH = "instance-modify"
2273
  HTYPE = constants.HTYPE_INSTANCE
2274
  REQ_BGL = False
2275

    
2276
  @staticmethod
2277
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2278
    assert ht.TList(mods)
2279
    assert not mods or len(mods[0]) in (2, 3)
2280

    
2281
    if mods and len(mods[0]) == 2:
2282
      result = []
2283

    
2284
      addremove = 0
2285
      for op, params in mods:
2286
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2287
          result.append((op, -1, params))
2288
          addremove += 1
2289

    
2290
          if addremove > 1:
2291
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2292
                                       " supported at a time" % kind,
2293
                                       errors.ECODE_INVAL)
2294
        else:
2295
          result.append((constants.DDM_MODIFY, op, params))
2296

    
2297
      assert verify_fn(result)
2298
    else:
2299
      result = mods
2300

    
2301
    return result
2302

    
2303
  @staticmethod
2304
  def _CheckMods(kind, mods, key_types, item_fn):
2305
    """Ensures requested disk/NIC modifications are valid.
2306

2307
    """
2308
    for (op, _, params) in mods:
2309
      assert ht.TDict(params)
2310

    
2311
      # If 'key_types' is an empty dict, we assume we have an
2312
      # 'ext' template and thus do not ForceDictType
2313
      if key_types:
2314
        utils.ForceDictType(params, key_types)
2315

    
2316
      if op == constants.DDM_REMOVE:
2317
        if params:
2318
          raise errors.OpPrereqError("No settings should be passed when"
2319
                                     " removing a %s" % kind,
2320
                                     errors.ECODE_INVAL)
2321
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2322
        item_fn(op, params)
2323
      else:
2324
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2325

    
2326
  @staticmethod
2327
  def _VerifyDiskModification(op, params, excl_stor):
2328
    """Verifies a disk modification.
2329

2330
    """
2331
    if op == constants.DDM_ADD:
2332
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2333
      if mode not in constants.DISK_ACCESS_SET:
2334
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2335
                                   errors.ECODE_INVAL)
2336

    
2337
      size = params.get(constants.IDISK_SIZE, None)
2338
      if size is None:
2339
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2340
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2341
      size = int(size)
2342

    
2343
      params[constants.IDISK_SIZE] = size
2344
      name = params.get(constants.IDISK_NAME, None)
2345
      if name is not None and name.lower() == constants.VALUE_NONE:
2346
        params[constants.IDISK_NAME] = None
2347

    
2348
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2349

    
2350
    elif op == constants.DDM_MODIFY:
2351
      if constants.IDISK_SIZE in params:
2352
        raise errors.OpPrereqError("Disk size change not possible, use"
2353
                                   " grow-disk", errors.ECODE_INVAL)
2354
      if len(params) > 2:
2355
        raise errors.OpPrereqError("Disk modification doesn't support"
2356
                                   " additional arbitrary parameters",
2357
                                   errors.ECODE_INVAL)
2358
      name = params.get(constants.IDISK_NAME, None)
2359
      if name is not None and name.lower() == constants.VALUE_NONE:
2360
        params[constants.IDISK_NAME] = None
2361

    
2362
  @staticmethod
2363
  def _VerifyNicModification(op, params):
2364
    """Verifies a network interface modification.
2365

2366
    """
2367
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2368
      ip = params.get(constants.INIC_IP, None)
2369
      name = params.get(constants.INIC_NAME, None)
2370
      req_net = params.get(constants.INIC_NETWORK, None)
2371
      link = params.get(constants.NIC_LINK, None)
2372
      mode = params.get(constants.NIC_MODE, None)
2373
      if name is not None and name.lower() == constants.VALUE_NONE:
2374
        params[constants.INIC_NAME] = None
2375
      if req_net is not None:
2376
        if req_net.lower() == constants.VALUE_NONE:
2377
          params[constants.INIC_NETWORK] = None
2378
          req_net = None
2379
        elif link is not None or mode is not None:
2380
          raise errors.OpPrereqError("If network is given"
2381
                                     " mode or link should not",
2382
                                     errors.ECODE_INVAL)
2383

    
2384
      if op == constants.DDM_ADD:
2385
        macaddr = params.get(constants.INIC_MAC, None)
2386
        if macaddr is None:
2387
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2388

    
2389
      if ip is not None:
2390
        if ip.lower() == constants.VALUE_NONE:
2391
          params[constants.INIC_IP] = None
2392
        else:
2393
          if ip.lower() == constants.NIC_IP_POOL:
2394
            if op == constants.DDM_ADD and req_net is None:
2395
              raise errors.OpPrereqError("If ip=pool, parameter network"
2396
                                         " cannot be none",
2397
                                         errors.ECODE_INVAL)
2398
          else:
2399
            if not netutils.IPAddress.IsValid(ip):
2400
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2401
                                         errors.ECODE_INVAL)
2402

    
2403
      if constants.INIC_MAC in params:
2404
        macaddr = params[constants.INIC_MAC]
2405
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2406
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2407

    
2408
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2409
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2410
                                     " modifying an existing NIC",
2411
                                     errors.ECODE_INVAL)
2412

    
2413
  def CheckArguments(self):
2414
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2415
            self.op.hvparams or self.op.beparams or self.op.os_name or
2416
            self.op.osparams or self.op.offline is not None or
2417
            self.op.runtime_mem or self.op.pnode):
2418
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2419

    
2420
    if self.op.hvparams:
2421
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2422
                           "hypervisor", "instance", "cluster")
2423

    
2424
    self.op.disks = self._UpgradeDiskNicMods(
2425
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2426
    self.op.nics = self._UpgradeDiskNicMods(
2427
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2428

    
2429
    if self.op.disks and self.op.disk_template is not None:
2430
      raise errors.OpPrereqError("Disk template conversion and other disk"
2431
                                 " changes not supported at the same time",
2432
                                 errors.ECODE_INVAL)
2433

    
2434
    if (self.op.disk_template and
2435
        self.op.disk_template in constants.DTS_INT_MIRROR and
2436
        self.op.remote_node is None):
2437
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2438
                                 " one requires specifying a secondary node",
2439
                                 errors.ECODE_INVAL)
2440

    
2441
    # Check NIC modifications
2442
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2443
                    self._VerifyNicModification)
2444

    
2445
    if self.op.pnode:
2446
      (self.op.pnode_uuid, self.op.pnode) = \
2447
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2448

    
2449
  def ExpandNames(self):
2450
    self._ExpandAndLockInstance()
2451
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2452
    # Can't even acquire node locks in shared mode as upcoming changes in
2453
    # Ganeti 2.6 will start to modify the node object on disk conversion
2454
    self.needed_locks[locking.LEVEL_NODE] = []
2455
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2456
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2457
    # Look node group to look up the ipolicy
2458
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2459

    
2460
  def DeclareLocks(self, level):
2461
    if level == locking.LEVEL_NODEGROUP:
2462
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2463
      # Acquire locks for the instance's nodegroups optimistically. Needs
2464
      # to be verified in CheckPrereq
2465
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2466
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2467
    elif level == locking.LEVEL_NODE:
2468
      self._LockInstancesNodes()
2469
      if self.op.disk_template and self.op.remote_node:
2470
        (self.op.remote_node_uuid, self.op.remote_node) = \
2471
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2472
                                self.op.remote_node)
2473
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2474
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2475
      # Copy node locks
2476
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2477
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2478

    
2479
  def BuildHooksEnv(self):
2480
    """Build hooks env.
2481

2482
    This runs on the master, primary and secondaries.
2483

2484
    """
2485
    args = {}
2486
    if constants.BE_MINMEM in self.be_new:
2487
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2488
    if constants.BE_MAXMEM in self.be_new:
2489
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2490
    if constants.BE_VCPUS in self.be_new:
2491
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2492
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2493
    # information at all.
2494

    
2495
    if self._new_nics is not None:
2496
      nics = []
2497

    
2498
      for nic in self._new_nics:
2499
        n = copy.deepcopy(nic)
2500
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2501
        n.nicparams = nicparams
2502
        nics.append(NICToTuple(self, n))
2503

    
2504
      args["nics"] = nics
2505

    
2506
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2507
    if self.op.disk_template:
2508
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2509
    if self.op.runtime_mem:
2510
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2511

    
2512
    return env
2513

    
2514
  def BuildHooksNodes(self):
2515
    """Build hooks nodes.
2516

2517
    """
2518
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2519
    return (nl, nl)
2520

    
2521
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2522
                              old_params, cluster, pnode_uuid):
2523

    
2524
    update_params_dict = dict([(key, params[key])
2525
                               for key in constants.NICS_PARAMETERS
2526
                               if key in params])
2527

    
2528
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2529
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2530

    
2531
    new_net_uuid = None
2532
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2533
    if new_net_uuid_or_name:
2534
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2535
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2536

    
2537
    if old_net_uuid:
2538
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2539

    
2540
    if new_net_uuid:
2541
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2542
      if not netparams:
2543
        raise errors.OpPrereqError("No netparams found for the network"
2544
                                   " %s, probably not connected" %
2545
                                   new_net_obj.name, errors.ECODE_INVAL)
2546
      new_params = dict(netparams)
2547
    else:
2548
      new_params = GetUpdatedParams(old_params, update_params_dict)
2549

    
2550
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2551

    
2552
    new_filled_params = cluster.SimpleFillNIC(new_params)
2553
    objects.NIC.CheckParameterSyntax(new_filled_params)
2554

    
2555
    new_mode = new_filled_params[constants.NIC_MODE]
2556
    if new_mode == constants.NIC_MODE_BRIDGED:
2557
      bridge = new_filled_params[constants.NIC_LINK]
2558
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2559
      if msg:
2560
        msg = "Error checking bridges on node '%s': %s" % \
2561
                (self.cfg.GetNodeName(pnode_uuid), msg)
2562
        if self.op.force:
2563
          self.warn.append(msg)
2564
        else:
2565
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2566

    
2567
    elif new_mode == constants.NIC_MODE_ROUTED:
2568
      ip = params.get(constants.INIC_IP, old_ip)
2569
      if ip is None:
2570
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2571
                                   " on a routed NIC", errors.ECODE_INVAL)
2572

    
2573
    elif new_mode == constants.NIC_MODE_OVS:
2574
      # TODO: check OVS link
2575
      self.LogInfo("OVS links are currently not checked for correctness")
2576

    
2577
    if constants.INIC_MAC in params:
2578
      mac = params[constants.INIC_MAC]
2579
      if mac is None:
2580
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2581
                                   errors.ECODE_INVAL)
2582
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2583
        # otherwise generate the MAC address
2584
        params[constants.INIC_MAC] = \
2585
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2586
      else:
2587
        # or validate/reserve the current one
2588
        try:
2589
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2590
        except errors.ReservationError:
2591
          raise errors.OpPrereqError("MAC address '%s' already in use"
2592
                                     " in cluster" % mac,
2593
                                     errors.ECODE_NOTUNIQUE)
2594
    elif new_net_uuid != old_net_uuid:
2595

    
2596
      def get_net_prefix(net_uuid):
2597
        mac_prefix = None
2598
        if net_uuid:
2599
          nobj = self.cfg.GetNetwork(net_uuid)
2600
          mac_prefix = nobj.mac_prefix
2601

    
2602
        return mac_prefix
2603

    
2604
      new_prefix = get_net_prefix(new_net_uuid)
2605
      old_prefix = get_net_prefix(old_net_uuid)
2606
      if old_prefix != new_prefix:
2607
        params[constants.INIC_MAC] = \
2608
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2609

    
2610
    # if there is a change in (ip, network) tuple
2611
    new_ip = params.get(constants.INIC_IP, old_ip)
2612
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2613
      if new_ip:
2614
        # if IP is pool then require a network and generate one IP
2615
        if new_ip.lower() == constants.NIC_IP_POOL:
2616
          if new_net_uuid:
2617
            try:
2618
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2619
            except errors.ReservationError:
2620
              raise errors.OpPrereqError("Unable to get a free IP"
2621
                                         " from the address pool",
2622
                                         errors.ECODE_STATE)
2623
            self.LogInfo("Chose IP %s from network %s",
2624
                         new_ip,
2625
                         new_net_obj.name)
2626
            params[constants.INIC_IP] = new_ip
2627
          else:
2628
            raise errors.OpPrereqError("ip=pool, but no network found",
2629
                                       errors.ECODE_INVAL)
2630
        # Reserve new IP if in the new network if any
2631
        elif new_net_uuid:
2632
          try:
2633
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2634
            self.LogInfo("Reserving IP %s in network %s",
2635
                         new_ip, new_net_obj.name)
2636
          except errors.ReservationError:
2637
            raise errors.OpPrereqError("IP %s not available in network %s" %
2638
                                       (new_ip, new_net_obj.name),
2639
                                       errors.ECODE_NOTUNIQUE)
2640
        # new network is None so check if new IP is a conflicting IP
2641
        elif self.op.conflicts_check:
2642
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2643

    
2644
      # release old IP if old network is not None
2645
      if old_ip and old_net_uuid:
2646
        try:
2647
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2648
        except errors.AddressPoolError:
2649
          logging.warning("Release IP %s not contained in network %s",
2650
                          old_ip, old_net_obj.name)
2651

    
2652
    # there are no changes in (ip, network) tuple and old network is not None
2653
    elif (old_net_uuid is not None and
2654
          (req_link is not None or req_mode is not None)):
2655
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2656
                                 " a NIC that is connected to a network",
2657
                                 errors.ECODE_INVAL)
2658

    
2659
    private.params = new_params
2660
    private.filled = new_filled_params
2661

    
2662
  def _PreCheckDiskTemplate(self, pnode_info):
2663
    """CheckPrereq checks related to a new disk template."""
2664
    # Arguments are passed to avoid configuration lookups
2665
    pnode_uuid = self.instance.primary_node
2666
    if self.instance.disk_template == self.op.disk_template:
2667
      raise errors.OpPrereqError("Instance already has disk template %s" %
2668
                                 self.instance.disk_template,
2669
                                 errors.ECODE_INVAL)
2670

    
2671
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2672
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2673
                                 " cluster." % self.op.disk_template)
2674

    
2675
    if (self.instance.disk_template,
2676
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2677
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2678
                                 " %s to %s" % (self.instance.disk_template,
2679
                                                self.op.disk_template),
2680
                                 errors.ECODE_INVAL)
2681
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2682
                       msg="cannot change disk template")
2683
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2684
      if self.op.remote_node_uuid == pnode_uuid:
2685
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2686
                                   " as the primary node of the instance" %
2687
                                   self.op.remote_node, errors.ECODE_STATE)
2688
      CheckNodeOnline(self, self.op.remote_node_uuid)
2689
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2690
      # FIXME: here we assume that the old instance type is DT_PLAIN
2691
      assert self.instance.disk_template == constants.DT_PLAIN
2692
      disks = [{constants.IDISK_SIZE: d.size,
2693
                constants.IDISK_VG: d.logical_id[0]}
2694
               for d in self.instance.disks]
2695
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2696
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2697

    
2698
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2699
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2700
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2701
                                                              snode_group)
2702
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2703
                             ignore=self.op.ignore_ipolicy)
2704
      if pnode_info.group != snode_info.group:
2705
        self.LogWarning("The primary and secondary nodes are in two"
2706
                        " different node groups; the disk parameters"
2707
                        " from the first disk's node group will be"
2708
                        " used")
2709

    
2710
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2711
      # Make sure none of the nodes require exclusive storage
2712
      nodes = [pnode_info]
2713
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2714
        assert snode_info
2715
        nodes.append(snode_info)
2716
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2717
      if compat.any(map(has_es, nodes)):
2718
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2719
                  " storage is enabled" % (self.instance.disk_template,
2720
                                           self.op.disk_template))
2721
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2722

    
2723
  def _PreCheckDisks(self, ispec):
2724
    """CheckPrereq checks related to disk changes.
2725

2726
    @type ispec: dict
2727
    @param ispec: instance specs to be updated with the new disks
2728

2729
    """
2730
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2731

    
2732
    excl_stor = compat.any(
2733
      rpc.GetExclusiveStorageForNodes(self.cfg,
2734
                                      self.instance.all_nodes).values()
2735
      )
2736

    
2737
    # Check disk modifications. This is done here and not in CheckArguments
2738
    # (as with NICs), because we need to know the instance's disk template
2739
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2740
    if self.instance.disk_template == constants.DT_EXT:
2741
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2742
    else:
2743
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2744
                      ver_fn)
2745

    
2746
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2747

    
2748
    # Check the validity of the `provider' parameter
2749
    if self.instance.disk_template in constants.DT_EXT:
2750
      for mod in self.diskmod:
2751
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2752
        if mod[0] == constants.DDM_ADD:
2753
          if ext_provider is None:
2754
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2755
                                       " '%s' missing, during disk add" %
2756
                                       (constants.DT_EXT,
2757
                                        constants.IDISK_PROVIDER),
2758
                                       errors.ECODE_NOENT)
2759
        elif mod[0] == constants.DDM_MODIFY:
2760
          if ext_provider:
2761
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2762
                                       " modification" %
2763
                                       constants.IDISK_PROVIDER,
2764
                                       errors.ECODE_INVAL)
2765
    else:
2766
      for mod in self.diskmod:
2767
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2768
        if ext_provider is not None:
2769
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2770
                                     " instances of type '%s'" %
2771
                                     (constants.IDISK_PROVIDER,
2772
                                      constants.DT_EXT),
2773
                                     errors.ECODE_INVAL)
2774

    
2775
    if not self.op.wait_for_sync and self.instance.disks_active:
2776
      for mod in self.diskmod:
2777
        if mod[0] == constants.DDM_ADD:
2778
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2779
                                     " activated disks and"
2780
                                     " --no-wait-for-sync given.",
2781
                                     errors.ECODE_INVAL)
2782

    
2783
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2784
      raise errors.OpPrereqError("Disk operations not supported for"
2785
                                 " diskless instances", errors.ECODE_INVAL)
2786

    
2787
    def _PrepareDiskMod(_, disk, params, __):
2788
      disk.name = params.get(constants.IDISK_NAME, None)
2789

    
2790
    # Verify disk changes (operating on a copy)
2791
    disks = copy.deepcopy(self.instance.disks)
2792
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2793
                        _PrepareDiskMod, None)
2794
    utils.ValidateDeviceNames("disk", disks)
2795
    if len(disks) > constants.MAX_DISKS:
2796
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2797
                                 " more" % constants.MAX_DISKS,
2798
                                 errors.ECODE_STATE)
2799
    disk_sizes = [disk.size for disk in self.instance.disks]
2800
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2801
                      self.diskmod if op == constants.DDM_ADD)
2802
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2803
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2804

    
2805
    if self.op.offline is not None and self.op.offline:
2806
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2807
                         msg="can't change to offline")
2808

    
2809
  def CheckPrereq(self):
2810
    """Check prerequisites.
2811

2812
    This only checks the instance list against the existing names.
2813

2814
    """
2815
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2816
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2817
    self.cluster = self.cfg.GetClusterInfo()
2818
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2819

    
2820
    assert self.instance is not None, \
2821
      "Cannot retrieve locked instance %s" % self.op.instance_name
2822

    
2823
    pnode_uuid = self.instance.primary_node
2824

    
2825
    self.warn = []
2826

    
2827
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2828
        not self.op.force):
2829
      # verify that the instance is not up
2830
      instance_info = self.rpc.call_instance_info(
2831
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2832
          cluster_hvparams)
2833
      if instance_info.fail_msg:
2834
        self.warn.append("Can't get instance runtime information: %s" %
2835
                         instance_info.fail_msg)
2836
      elif instance_info.payload:
2837
        raise errors.OpPrereqError("Instance is still running on %s" %
2838
                                   self.cfg.GetNodeName(pnode_uuid),
2839
                                   errors.ECODE_STATE)
2840

    
2841
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2842
    node_uuids = list(self.instance.all_nodes)
2843
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2844

    
2845
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2846
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2847
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2848

    
2849
    # dictionary with instance information after the modification
2850
    ispec = {}
2851

    
2852
    if self.op.hotplug:
2853
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2854
                                               self.instance)
2855
      result.Raise("Hotplug is not supported.")
2856

    
2857
    # Prepare NIC modifications
2858
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2859

    
2860
    # OS change
2861
    if self.op.os_name and not self.op.force:
2862
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2863
                     self.op.force_variant)
2864
      instance_os = self.op.os_name
2865
    else:
2866
      instance_os = self.instance.os
2867

    
2868
    assert not (self.op.disk_template and self.op.disks), \
2869
      "Can't modify disk template and apply disk changes at the same time"
2870

    
2871
    if self.op.disk_template:
2872
      self._PreCheckDiskTemplate(pnode_info)
2873

    
2874
    self._PreCheckDisks(ispec)
2875

    
2876
    # hvparams processing
2877
    if self.op.hvparams:
2878
      hv_type = self.instance.hypervisor
2879
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2880
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2881
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2882

    
2883
      # local check
2884
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2885
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2886
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2887
      self.hv_inst = i_hvdict # the new dict (without defaults)
2888
    else:
2889
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2890
                                                   self.instance.os,
2891
                                                   self.instance.hvparams)
2892
      self.hv_new = self.hv_inst = {}
2893

    
2894
    # beparams processing
2895
    if self.op.beparams:
2896
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2897
                                  use_none=True)
2898
      objects.UpgradeBeParams(i_bedict)
2899
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2900
      be_new = self.cluster.SimpleFillBE(i_bedict)
2901
      self.be_proposed = self.be_new = be_new # the new actual values
2902
      self.be_inst = i_bedict # the new dict (without defaults)
2903
    else:
2904
      self.be_new = self.be_inst = {}
2905
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2906
    be_old = self.cluster.FillBE(self.instance)
2907

    
2908
    # CPU param validation -- checking every time a parameter is
2909
    # changed to cover all cases where either CPU mask or vcpus have
2910
    # changed
2911
    if (constants.BE_VCPUS in self.be_proposed and
2912
        constants.HV_CPU_MASK in self.hv_proposed):
2913
      cpu_list = \
2914
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2915
      # Verify mask is consistent with number of vCPUs. Can skip this
2916
      # test if only 1 entry in the CPU mask, which means same mask
2917
      # is applied to all vCPUs.
2918
      if (len(cpu_list) > 1 and
2919
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2920
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2921
                                   " CPU mask [%s]" %
2922
                                   (self.be_proposed[constants.BE_VCPUS],
2923
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2924
                                   errors.ECODE_INVAL)
2925

    
2926
      # Only perform this test if a new CPU mask is given
2927
      if constants.HV_CPU_MASK in self.hv_new:
2928
        # Calculate the largest CPU number requested
2929
        max_requested_cpu = max(map(max, cpu_list))
2930
        # Check that all of the instance's nodes have enough physical CPUs to
2931
        # satisfy the requested CPU mask
2932
        hvspecs = [(self.instance.hypervisor,
2933
                    self.cfg.GetClusterInfo()
2934
                      .hvparams[self.instance.hypervisor])]
2935
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2936
                                max_requested_cpu + 1,
2937
                                hvspecs)
2938

    
2939
    # osparams processing
2940
    if self.op.osparams:
2941
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2942
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2943
      self.os_inst = i_osdict # the new dict (without defaults)
2944
    else:
2945
      self.os_inst = {}
2946

    
2947
    #TODO(dynmem): do the appropriate check involving MINMEM
2948
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2949
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2950
      mem_check_list = [pnode_uuid]
2951
      if be_new[constants.BE_AUTO_BALANCE]:
2952
        # either we changed auto_balance to yes or it was from before
2953
        mem_check_list.extend(self.instance.secondary_nodes)
2954
      instance_info = self.rpc.call_instance_info(
2955
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2956
          cluster_hvparams)
2957
      hvspecs = [(self.instance.hypervisor,
2958
                  cluster_hvparams)]
2959
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2960
                                         hvspecs)
2961
      pninfo = nodeinfo[pnode_uuid]
2962
      msg = pninfo.fail_msg
2963
      if msg:
2964
        # Assume the primary node is unreachable and go ahead
2965
        self.warn.append("Can't get info from primary node %s: %s" %
2966
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2967
      else:
2968
        (_, _, (pnhvinfo, )) = pninfo.payload
2969
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2970
          self.warn.append("Node data from primary node %s doesn't contain"
2971
                           " free memory information" %
2972
                           self.cfg.GetNodeName(pnode_uuid))
2973
        elif instance_info.fail_msg:
2974
          self.warn.append("Can't get instance runtime information: %s" %
2975
                           instance_info.fail_msg)
2976
        else:
2977
          if instance_info.payload:
2978
            current_mem = int(instance_info.payload["memory"])
2979
          else:
2980
            # Assume instance not running
2981
            # (there is a slight race condition here, but it's not very
2982
            # probable, and we have no other way to check)
2983
            # TODO: Describe race condition
2984
            current_mem = 0
2985
          #TODO(dynmem): do the appropriate check involving MINMEM
2986
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2987
                      pnhvinfo["memory_free"])
2988
          if miss_mem > 0:
2989
            raise errors.OpPrereqError("This change will prevent the instance"
2990
                                       " from starting, due to %d MB of memory"
2991
                                       " missing on its primary node" %
2992
                                       miss_mem, errors.ECODE_NORES)
2993

    
2994
      if be_new[constants.BE_AUTO_BALANCE]:
2995
        for node_uuid, nres in nodeinfo.items():
2996
          if node_uuid not in self.instance.secondary_nodes:
2997
            continue
2998
          nres.Raise("Can't get info from secondary node %s" %
2999
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3000
                     ecode=errors.ECODE_STATE)
3001
          (_, _, (nhvinfo, )) = nres.payload
3002
          if not isinstance(nhvinfo.get("memory_free", None), int):
3003
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3004
                                       " memory information" %
3005
                                       self.cfg.GetNodeName(node_uuid),
3006
                                       errors.ECODE_STATE)
3007
          #TODO(dynmem): do the appropriate check involving MINMEM
3008
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3009
            raise errors.OpPrereqError("This change will prevent the instance"
3010
                                       " from failover to its secondary node"
3011
                                       " %s, due to not enough memory" %
3012
                                       self.cfg.GetNodeName(node_uuid),
3013
                                       errors.ECODE_STATE)
3014

    
3015
    if self.op.runtime_mem:
3016
      remote_info = self.rpc.call_instance_info(
3017
         self.instance.primary_node, self.instance.name,
3018
         self.instance.hypervisor,
3019
         cluster_hvparams)
3020
      remote_info.Raise("Error checking node %s" %
3021
                        self.cfg.GetNodeName(self.instance.primary_node))
3022
      if not remote_info.payload: # not running already
3023
        raise errors.OpPrereqError("Instance %s is not running" %
3024
                                   self.instance.name, errors.ECODE_STATE)
3025

    
3026
      current_memory = remote_info.payload["memory"]
3027
      if (not self.op.force and
3028
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3029
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3030
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3031
                                   " and %d MB of memory unless --force is"
3032
                                   " given" %
3033
                                   (self.instance.name,
3034
                                    self.be_proposed[constants.BE_MINMEM],
3035
                                    self.be_proposed[constants.BE_MAXMEM]),
3036
                                   errors.ECODE_INVAL)
3037

    
3038
      delta = self.op.runtime_mem - current_memory
3039
      if delta > 0:
3040
        CheckNodeFreeMemory(
3041
            self, self.instance.primary_node,
3042
            "ballooning memory for instance %s" % self.instance.name, delta,
3043
            self.instance.hypervisor,
3044
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3045

    
3046
    # make self.cluster visible in the functions below
3047
    cluster = self.cluster
3048

    
3049
    def _PrepareNicCreate(_, params, private):
3050
      self._PrepareNicModification(params, private, None, None,
3051
                                   {}, cluster, pnode_uuid)
3052
      return (None, None)
3053

    
3054
    def _PrepareNicMod(_, nic, params, private):
3055
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3056
                                   nic.nicparams, cluster, pnode_uuid)
3057
      return None
3058

    
3059
    def _PrepareNicRemove(_, params, __):
3060
      ip = params.ip
3061
      net = params.network
3062
      if net is not None and ip is not None:
3063
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3064

    
3065
    # Verify NIC changes (operating on copy)
3066
    nics = self.instance.nics[:]
3067
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3068
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3069
    if len(nics) > constants.MAX_NICS:
3070
      raise errors.OpPrereqError("Instance has too many network interfaces"
3071
                                 " (%d), cannot add more" % constants.MAX_NICS,
3072
                                 errors.ECODE_STATE)
3073

    
3074
    # Pre-compute NIC changes (necessary to use result in hooks)
3075
    self._nic_chgdesc = []
3076
    if self.nicmod:
3077
      # Operate on copies as this is still in prereq
3078
      nics = [nic.Copy() for nic in self.instance.nics]
3079
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3080
                          self._CreateNewNic, self._ApplyNicMods,
3081
                          self._RemoveNic)
3082
      # Verify that NIC names are unique and valid
3083
      utils.ValidateDeviceNames("NIC", nics)
3084
      self._new_nics = nics
3085
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3086
    else:
3087
      self._new_nics = None
3088
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3089

    
3090
    if not self.op.ignore_ipolicy:
3091
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3092
                                                              group_info)
3093

    
3094
      # Fill ispec with backend parameters
3095
      ispec[constants.ISPEC_SPINDLE_USE] = \
3096
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3097
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3098
                                                         None)
3099

    
3100
      # Copy ispec to verify parameters with min/max values separately
3101
      if self.op.disk_template:
3102
        new_disk_template = self.op.disk_template
3103
      else:
3104
        new_disk_template = self.instance.disk_template
3105
      ispec_max = ispec.copy()
3106
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3107
        self.be_new.get(constants.BE_MAXMEM, None)
3108
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3109
                                                     new_disk_template)
3110
      ispec_min = ispec.copy()
3111
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3112
        self.be_new.get(constants.BE_MINMEM, None)
3113
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3114
                                                     new_disk_template)
3115

    
3116
      if (res_max or res_min):
3117
        # FIXME: Improve error message by including information about whether
3118
        # the upper or lower limit of the parameter fails the ipolicy.
3119
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3120
               (group_info, group_info.name,
3121
                utils.CommaJoin(set(res_max + res_min))))
3122
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3123

    
3124
  def _ConvertPlainToDrbd(self, feedback_fn):
3125
    """Converts an instance from plain to drbd.
3126

3127
    """
3128
    feedback_fn("Converting template to drbd")
3129
    pnode_uuid = self.instance.primary_node
3130
    snode_uuid = self.op.remote_node_uuid
3131

    
3132
    assert self.instance.disk_template == constants.DT_PLAIN
3133

    
3134
    # create a fake disk info for _GenerateDiskTemplate
3135
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3136
                  constants.IDISK_VG: d.logical_id[0],
3137
                  constants.IDISK_NAME: d.name}
3138
                 for d in self.instance.disks]
3139
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3140
                                     self.instance.uuid, pnode_uuid,
3141
                                     [snode_uuid], disk_info, None, None, 0,
3142
                                     feedback_fn, self.diskparams)
3143
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3144
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3145
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3146
    info = GetInstanceInfoText(self.instance)
3147
    feedback_fn("Creating additional volumes...")
3148
    # first, create the missing data and meta devices
3149
    for disk in anno_disks:
3150
      # unfortunately this is... not too nice
3151
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3152
                           info, True, p_excl_stor)
3153
      for child in disk.children:
3154
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3155
                             s_excl_stor)
3156
    # at this stage, all new LVs have been created, we can rename the
3157
    # old ones
3158
    feedback_fn("Renaming original volumes...")
3159
    rename_list = [(o, n.children[0].logical_id)
3160
                   for (o, n) in zip(self.instance.disks, new_disks)]
3161
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3162
    result.Raise("Failed to rename original LVs")
3163

    
3164
    feedback_fn("Initializing DRBD devices...")
3165
    # all child devices are in place, we can now create the DRBD devices
3166
    try:
3167
      for disk in anno_disks:
3168
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3169
                                       (snode_uuid, s_excl_stor)]:
3170
          f_create = node_uuid == pnode_uuid
3171
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3172
                               f_create, excl_stor)
3173
    except errors.GenericError, e:
3174
      feedback_fn("Initializing of DRBD devices failed;"
3175
                  " renaming back original volumes...")
3176
      rename_back_list = [(n.children[0], o.logical_id)
3177
                          for (n, o) in zip(new_disks, self.instance.disks)]
3178
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3179
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3180
      raise
3181

    
3182
    # at this point, the instance has been modified
3183
    self.instance.disk_template = constants.DT_DRBD8
3184
    self.instance.disks = new_disks
3185
    self.cfg.Update(self.instance, feedback_fn)
3186

    
3187
    # Release node locks while waiting for sync
3188
    ReleaseLocks(self, locking.LEVEL_NODE)
3189

    
3190
    # disks are created, waiting for sync
3191
    disk_abort = not WaitForSync(self, self.instance,
3192
                                 oneshot=not self.op.wait_for_sync)
3193
    if disk_abort:
3194
      raise errors.OpExecError("There are some degraded disks for"
3195
                               " this instance, please cleanup manually")
3196

    
3197
    # Node resource locks will be released by caller
3198

    
3199
  def _ConvertDrbdToPlain(self, feedback_fn):
3200
    """Converts an instance from drbd to plain.
3201

3202
    """
3203
    assert len(self.instance.secondary_nodes) == 1
3204
    assert self.instance.disk_template == constants.DT_DRBD8
3205

    
3206
    pnode_uuid = self.instance.primary_node
3207
    snode_uuid = self.instance.secondary_nodes[0]
3208
    feedback_fn("Converting template to plain")
3209

    
3210
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3211
    new_disks = [d.children[0] for d in self.instance.disks]
3212

    
3213
    # copy over size, mode and name
3214
    for parent, child in zip(old_disks, new_disks):
3215
      child.size = parent.size
3216
      child.mode = parent.mode
3217
      child.name = parent.name
3218

    
3219
    # this is a DRBD disk, return its port to the pool
3220
    # NOTE: this must be done right before the call to cfg.Update!
3221
    for disk in old_disks:
3222
      tcp_port = disk.logical_id[2]
3223
      self.cfg.AddTcpUdpPort(tcp_port)
3224

    
3225
    # update instance structure
3226
    self.instance.disks = new_disks
3227
    self.instance.disk_template = constants.DT_PLAIN
3228
    _UpdateIvNames(0, self.instance.disks)
3229
    self.cfg.Update(self.instance, feedback_fn)
3230

    
3231
    # Release locks in case removing disks takes a while
3232
    ReleaseLocks(self, locking.LEVEL_NODE)
3233

    
3234
    feedback_fn("Removing volumes on the secondary node...")
3235
    for disk in old_disks:
3236
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3237
      result.Warn("Could not remove block device %s on node %s,"
3238
                  " continuing anyway" %
3239
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3240
                  self.LogWarning)
3241

    
3242
    feedback_fn("Removing unneeded volumes on the primary node...")
3243
    for idx, disk in enumerate(old_disks):
3244
      meta = disk.children[1]
3245
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3246
      result.Warn("Could not remove metadata for disk %d on node %s,"
3247
                  " continuing anyway" %
3248
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3249
                  self.LogWarning)
3250

    
3251
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3252
    self.LogInfo("Trying to hotplug device...")
3253
    msg = "hotplug:"
3254
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3255
                                          self.instance, action, dev_type,
3256
                                          (device, self.instance),
3257
                                          extra, seq)
3258
    if result.fail_msg:
3259
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3260
      self.LogInfo("Continuing execution..")
3261
      msg += "failed"
3262
    else:
3263
      self.LogInfo("Hotplug done.")
3264
      msg += "done"
3265
    return msg
3266

    
3267
  def _CreateNewDisk(self, idx, params, _):
3268
    """Creates a new disk.
3269

3270
    """
3271
    # add a new disk
3272
    if self.instance.disk_template in constants.DTS_FILEBASED:
3273
      (file_driver, file_path) = self.instance.disks[0].logical_id
3274
      file_path = os.path.dirname(file_path)
3275
    else:
3276
      file_driver = file_path = None
3277

    
3278
    disk = \
3279
      GenerateDiskTemplate(self, self.instance.disk_template,
3280
                           self.instance.uuid, self.instance.primary_node,
3281
                           self.instance.secondary_nodes, [params], file_path,
3282
                           file_driver, idx, self.Log, self.diskparams)[0]
3283

    
3284
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3285

    
3286
    if self.cluster.prealloc_wipe_disks:
3287
      # Wipe new disk
3288
      WipeOrCleanupDisks(self, self.instance,
3289
                         disks=[(idx, disk, 0)],
3290
                         cleanup=new_disks)
3291

    
3292
    changes = [
3293
      ("disk/%d" % idx,
3294
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3295
      ]
3296
    if self.op.hotplug:
3297
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3298
                                               (disk, self.instance),
3299
                                               self.instance.name, True, idx)
3300
      if result.fail_msg:
3301
        changes.append(("disk/%d" % idx, "assemble:failed"))
3302
        self.LogWarning("Can't assemble newly created disk %d: %s",
3303
                        idx, result.fail_msg)
3304
      else:
3305
        _, link_name = result.payload
3306
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3307
                                  constants.HOTPLUG_TARGET_DISK,
3308
                                  disk, link_name, idx)
3309
        changes.append(("disk/%d" % idx, msg))
3310

    
3311
    return (disk, changes)
3312

    
3313
  def _PostAddDisk(self, _, disk):
3314
    if not WaitForSync(self, self.instance, disks=[disk],
3315
                       oneshot=not self.op.wait_for_sync):
3316
      raise errors.OpExecError("Failed to sync disks of %s" %
3317
                               self.instance.name)
3318

    
3319
    # the disk is active at this point, so deactivate it if the instance disks
3320
    # are supposed to be inactive
3321
    if not self.instance.disks_active:
3322
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3323

    
3324
  @staticmethod
3325
  def _ModifyDisk(idx, disk, params, _):
3326
    """Modifies a disk.
3327

3328
    """
3329
    changes = []
3330
    mode = params.get(constants.IDISK_MODE, None)
3331
    if mode:
3332
      disk.mode = mode
3333
      changes.append(("disk.mode/%d" % idx, disk.mode))
3334

    
3335
    name = params.get(constants.IDISK_NAME, None)
3336
    disk.name = name
3337
    changes.append(("disk.name/%d" % idx, disk.name))
3338

    
3339
    return changes
3340

    
3341
  def _RemoveDisk(self, idx, root, _):
3342
    """Removes a disk.
3343

3344
    """
3345
    hotmsg = ""
3346
    if self.op.hotplug:
3347
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3348
                                   constants.HOTPLUG_TARGET_DISK,
3349
                                   root, None, idx)
3350
      ShutdownInstanceDisks(self, self.instance, [root])
3351

    
3352
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3353
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3354
                             self.instance.primary_node):
3355
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3356
              .fail_msg
3357
      if msg:
3358
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3359
                        " continuing anyway", idx,
3360
                        self.cfg.GetNodeName(node_uuid), msg)
3361

    
3362
    # if this is a DRBD disk, return its port to the pool
3363
    if root.dev_type in constants.DTS_DRBD:
3364
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3365

    
3366
    return hotmsg
3367

    
3368
  def _CreateNewNic(self, idx, params, private):
3369
    """Creates data structure for a new network interface.
3370

3371
    """
3372
    mac = params[constants.INIC_MAC]
3373
    ip = params.get(constants.INIC_IP, None)
3374
    net = params.get(constants.INIC_NETWORK, None)
3375
    name = params.get(constants.INIC_NAME, None)
3376
    net_uuid = self.cfg.LookupNetwork(net)
3377
    #TODO: not private.filled?? can a nic have no nicparams??
3378
    nicparams = private.filled
3379
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3380
                       nicparams=nicparams)
3381
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3382

    
3383
    changes = [
3384
      ("nic.%d" % idx,
3385
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3386
       (mac, ip, private.filled[constants.NIC_MODE],
3387
       private.filled[constants.NIC_LINK], net)),
3388
      ]
3389

    
3390
    if self.op.hotplug:
3391
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3392
                                constants.HOTPLUG_TARGET_NIC,
3393
                                nobj, None, idx)
3394
      changes.append(("nic.%d" % idx, msg))
3395

    
3396
    return (nobj, changes)
3397

    
3398
  def _ApplyNicMods(self, idx, nic, params, private):
3399
    """Modifies a network interface.
3400

3401
    """
3402
    changes = []
3403

    
3404
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3405
      if key in params:
3406
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3407
        setattr(nic, key, params[key])
3408

    
3409
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3410
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3411
    if new_net_uuid != nic.network:
3412
      changes.append(("nic.network/%d" % idx, new_net))
3413
      nic.network = new_net_uuid
3414

    
3415
    if private.filled:
3416
      nic.nicparams = private.filled
3417

    
3418
      for (key, val) in nic.nicparams.items():
3419
        changes.append(("nic.%s/%d" % (key, idx), val))
3420

    
3421
    if self.op.hotplug:
3422
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3423
                                constants.HOTPLUG_TARGET_NIC,
3424
                                nic, None, idx)
3425
      changes.append(("nic/%d" % idx, msg))
3426

    
3427
    return changes
3428

    
3429
  def _RemoveNic(self, idx, nic, _):
3430
    if self.op.hotplug:
3431
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3432
                                 constants.HOTPLUG_TARGET_NIC,
3433
                                 nic, None, idx)
3434

    
3435
  def Exec(self, feedback_fn):
3436
    """Modifies an instance.
3437

3438
    All parameters take effect only at the next restart of the instance.
3439

3440
    """
3441
    # Process here the warnings from CheckPrereq, as we don't have a
3442
    # feedback_fn there.
3443
    # TODO: Replace with self.LogWarning
3444
    for warn in self.warn:
3445
      feedback_fn("WARNING: %s" % warn)
3446

    
3447
    assert ((self.op.disk_template is None) ^
3448
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3449
      "Not owning any node resource locks"
3450

    
3451
    result = []
3452

    
3453
    # New primary node
3454
    if self.op.pnode_uuid:
3455
      self.instance.primary_node = self.op.pnode_uuid
3456

    
3457
    # runtime memory
3458
    if self.op.runtime_mem:
3459
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3460
                                                     self.instance,
3461
                                                     self.op.runtime_mem)
3462
      rpcres.Raise("Cannot modify instance runtime memory")
3463
      result.append(("runtime_memory", self.op.runtime_mem))
3464

    
3465
    # Apply disk changes
3466
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3467
                        self._CreateNewDisk, self._ModifyDisk,
3468
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3469
    _UpdateIvNames(0, self.instance.disks)
3470

    
3471
    if self.op.disk_template:
3472
      if __debug__:
3473
        check_nodes = set(self.instance.all_nodes)
3474
        if self.op.remote_node_uuid:
3475
          check_nodes.add(self.op.remote_node_uuid)
3476
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3477
          owned = self.owned_locks(level)
3478
          assert not (check_nodes - owned), \
3479
            ("Not owning the correct locks, owning %r, expected at least %r" %
3480
             (owned, check_nodes))
3481

    
3482
      r_shut = ShutdownInstanceDisks(self, self.instance)
3483
      if not r_shut:
3484
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3485
                                 " proceed with disk template conversion")
3486
      mode = (self.instance.disk_template, self.op.disk_template)
3487
      try:
3488
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3489
      except:
3490
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3491
        raise
3492
      result.append(("disk_template", self.op.disk_template))
3493

    
3494
      assert self.instance.disk_template == self.op.disk_template, \
3495
        ("Expected disk template '%s', found '%s'" %
3496
         (self.op.disk_template, self.instance.disk_template))
3497

    
3498
    # Release node and resource locks if there are any (they might already have
3499
    # been released during disk conversion)
3500
    ReleaseLocks(self, locking.LEVEL_NODE)
3501
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3502

    
3503
    # Apply NIC changes
3504
    if self._new_nics is not None:
3505
      self.instance.nics = self._new_nics
3506
      result.extend(self._nic_chgdesc)
3507

    
3508
    # hvparams changes
3509
    if self.op.hvparams:
3510
      self.instance.hvparams = self.hv_inst
3511
      for key, val in self.op.hvparams.iteritems():
3512
        result.append(("hv/%s" % key, val))
3513

    
3514
    # beparams changes
3515
    if self.op.beparams:
3516
      self.instance.beparams = self.be_inst
3517
      for key, val in self.op.beparams.iteritems():
3518
        result.append(("be/%s" % key, val))
3519

    
3520
    # OS change
3521
    if self.op.os_name:
3522
      self.instance.os = self.op.os_name
3523

    
3524
    # osparams changes
3525
    if self.op.osparams:
3526
      self.instance.osparams = self.os_inst
3527
      for key, val in self.op.osparams.iteritems():
3528
        result.append(("os/%s" % key, val))
3529

    
3530
    if self.op.offline is None:
3531
      # Ignore
3532
      pass
3533
    elif self.op.offline:
3534
      # Mark instance as offline
3535
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3536
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3537
    else:
3538
      # Mark instance as online, but stopped
3539
      self.cfg.MarkInstanceDown(self.instance.uuid)
3540
      result.append(("admin_state", constants.ADMINST_DOWN))
3541

    
3542
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3543

    
3544
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3545
                self.owned_locks(locking.LEVEL_NODE)), \
3546
      "All node locks should have been released by now"
3547

    
3548
    return result
3549

    
3550
  _DISK_CONVERSIONS = {
3551
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3552
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3553
    }
3554

    
3555

    
3556
class LUInstanceChangeGroup(LogicalUnit):
3557
  HPATH = "instance-change-group"
3558
  HTYPE = constants.HTYPE_INSTANCE
3559
  REQ_BGL = False
3560

    
3561
  def ExpandNames(self):
3562
    self.share_locks = ShareAll()
3563

    
3564
    self.needed_locks = {
3565
      locking.LEVEL_NODEGROUP: [],
3566
      locking.LEVEL_NODE: [],
3567
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3568
      }
3569

    
3570
    self._ExpandAndLockInstance()
3571

    
3572
    if self.op.target_groups:
3573
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3574
                                  self.op.target_groups)
3575
    else:
3576
      self.req_target_uuids = None
3577

    
3578
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3579

    
3580
  def DeclareLocks(self, level):
3581
    if level == locking.LEVEL_NODEGROUP:
3582
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3583

    
3584
      if self.req_target_uuids:
3585
        lock_groups = set(self.req_target_uuids)
3586

    
3587
        # Lock all groups used by instance optimistically; this requires going
3588
        # via the node before it's locked, requiring verification later on
3589
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3590
        lock_groups.update(instance_groups)
3591
      else:
3592
        # No target groups, need to lock all of them
3593
        lock_groups = locking.ALL_SET
3594

    
3595
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3596

    
3597
    elif level == locking.LEVEL_NODE:
3598
      if self.req_target_uuids:
3599
        # Lock all nodes used by instances
3600
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3601
        self._LockInstancesNodes()
3602

    
3603
        # Lock all nodes in all potential target groups
3604
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3605
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3606
        member_nodes = [node_uuid
3607
                        for group in lock_groups
3608
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3609
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3610
      else:
3611
        # Lock all nodes as all groups are potential targets
3612
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3613

    
3614
  def CheckPrereq(self):
3615
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3616
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3617
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3618

    
3619
    assert (self.req_target_uuids is None or
3620
            owned_groups.issuperset(self.req_target_uuids))
3621
    assert owned_instance_names == set([self.op.instance_name])
3622

    
3623
    # Get instance information
3624
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3625

    
3626
    # Check if node groups for locked instance are still correct
3627
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3628
      ("Instance %s's nodes changed while we kept the lock" %
3629
       self.op.instance_name)
3630

    
3631
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3632
                                          owned_groups)
3633

    
3634
    if self.req_target_uuids:
3635
      # User requested specific target groups
3636
      self.target_uuids = frozenset(self.req_target_uuids)
3637
    else:
3638
      # All groups except those used by the instance are potential targets
3639
      self.target_uuids = owned_groups - inst_groups
3640

    
3641
    conflicting_groups = self.target_uuids & inst_groups
3642
    if conflicting_groups:
3643
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3644
                                 " used by the instance '%s'" %
3645
                                 (utils.CommaJoin(conflicting_groups),
3646
                                  self.op.instance_name),
3647
                                 errors.ECODE_INVAL)
3648

    
3649
    if not self.target_uuids:
3650
      raise errors.OpPrereqError("There are no possible target groups",
3651
                                 errors.ECODE_INVAL)
3652

    
3653
  def BuildHooksEnv(self):
3654
    """Build hooks env.
3655

3656
    """
3657
    assert self.target_uuids
3658

    
3659
    env = {
3660
      "TARGET_GROUPS": " ".join(self.target_uuids),
3661
      }
3662

    
3663
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3664

    
3665
    return env
3666

    
3667
  def BuildHooksNodes(self):
3668
    """Build hooks nodes.
3669

3670
    """
3671
    mn = self.cfg.GetMasterNode()
3672
    return ([mn], [mn])
3673

    
3674
  def Exec(self, feedback_fn):
3675
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3676

    
3677
    assert instances == [self.op.instance_name], "Instance not locked"
3678

    
3679
    req = iallocator.IAReqGroupChange(instances=instances,
3680
                                      target_groups=list(self.target_uuids))
3681
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3682

    
3683
    ial.Run(self.op.iallocator)
3684

    
3685
    if not ial.success:
3686
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3687
                                 " instance '%s' using iallocator '%s': %s" %
3688
                                 (self.op.instance_name, self.op.iallocator,
3689
                                  ial.info), errors.ECODE_NORES)
3690

    
3691
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3692

    
3693
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3694
                 " instance '%s'", len(jobs), self.op.instance_name)
3695

    
3696
    return ResultWithJobs(jobs)