Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 987ec378

History | View | Annotate | Download (155.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
import ganeti.rpc.node as rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177
    vlan = nic.get(constants.INIC_VLAN, None)
178

    
179
    if net is None or net.lower() == constants.VALUE_NONE:
180
      net = None
181
    else:
182
      if nic_mode_req is not None or link is not None:
183
        raise errors.OpPrereqError("If network is given, no mode or link"
184
                                   " is allowed to be passed",
185
                                   errors.ECODE_INVAL)
186

    
187
    # ip validity checks
188
    if ip is None or ip.lower() == constants.VALUE_NONE:
189
      nic_ip = None
190
    elif ip.lower() == constants.VALUE_AUTO:
191
      if not op.name_check:
192
        raise errors.OpPrereqError("IP address set to auto but name checks"
193
                                   " have been skipped",
194
                                   errors.ECODE_INVAL)
195
      nic_ip = default_ip
196
    else:
197
      # We defer pool operations until later, so that the iallocator has
198
      # filled in the instance's node(s) dimara
199
      if ip.lower() == constants.NIC_IP_POOL:
200
        if net is None:
201
          raise errors.OpPrereqError("if ip=pool, parameter network"
202
                                     " must be passed too",
203
                                     errors.ECODE_INVAL)
204

    
205
      elif not netutils.IPAddress.IsValid(ip):
206
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
207
                                   errors.ECODE_INVAL)
208

    
209
      nic_ip = ip
210

    
211
    # TODO: check the ip address for uniqueness
212
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
213
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
214
                                 errors.ECODE_INVAL)
215

    
216
    # MAC address verification
217
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
218
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
219
      mac = utils.NormalizeAndValidateMac(mac)
220

    
221
      try:
222
        # TODO: We need to factor this out
223
        cfg.ReserveMAC(mac, ec_id)
224
      except errors.ReservationError:
225
        raise errors.OpPrereqError("MAC address %s already in use"
226
                                   " in cluster" % mac,
227
                                   errors.ECODE_NOTUNIQUE)
228

    
229
    #  Build nic parameters
230
    nicparams = {}
231
    if nic_mode_req:
232
      nicparams[constants.NIC_MODE] = nic_mode
233
    if link:
234
      nicparams[constants.NIC_LINK] = link
235
    if vlan:
236
      nicparams[constants.NIC_VLAN] = vlan
237

    
238
    check_params = cluster.SimpleFillNIC(nicparams)
239
    objects.NIC.CheckParameterSyntax(check_params)
240
    net_uuid = cfg.LookupNetwork(net)
241
    name = nic.get(constants.INIC_NAME, None)
242
    if name is not None and name.lower() == constants.VALUE_NONE:
243
      name = None
244
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
245
                          network=net_uuid, nicparams=nicparams)
246
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247
    nics.append(nic_obj)
248

    
249
  return nics
250

    
251

    
252
def _CheckForConflictingIp(lu, ip, node_uuid):
253
  """In case of conflicting IP address raise error.
254

255
  @type ip: string
256
  @param ip: IP address
257
  @type node_uuid: string
258
  @param node_uuid: node UUID
259

260
  """
261
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
262
  if conf_net is not None:
263
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
264
                                " network %s, but the target NIC does not." %
265
                                (ip, conf_net)),
266
                               errors.ECODE_STATE)
267

    
268
  return (None, None)
269

    
270

    
271
def _ComputeIPolicyInstanceSpecViolation(
272
  ipolicy, instance_spec, disk_template,
273
  _compute_fn=ComputeIPolicySpecViolation):
274
  """Compute if instance specs meets the specs of ipolicy.
275

276
  @type ipolicy: dict
277
  @param ipolicy: The ipolicy to verify against
278
  @param instance_spec: dict
279
  @param instance_spec: The instance spec to verify
280
  @type disk_template: string
281
  @param disk_template: the disk template of the instance
282
  @param _compute_fn: The function to verify ipolicy (unittest only)
283
  @see: L{ComputeIPolicySpecViolation}
284

285
  """
286
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
287
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
288
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
289
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
290
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
291
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
292

    
293
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
294
                     disk_sizes, spindle_use, disk_template)
295

    
296

    
297
def _ComputeInstanceCommunicationNIC(instance_name):
298
  """Compute the name of the instance NIC used by instance
299
  communication.
300

301
  With instance communication, a new NIC is added to the instance.
302
  This NIC has a special name that identities it as being part of
303
  instance communication, and not just a normal NIC.  This function
304
  generates the name of the NIC based on a prefix and the instance
305
  name
306

307
  @type instance_name: string
308
  @param instance_name: name of the instance the NIC belongs to
309

310
  @rtype: string
311
  @return: name of the NIC
312

313
  """
314
  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def _CheckDiskTemplateValid(self):
326
    """Checks validity of disk template.
327

328
    """
329
    cluster = self.cfg.GetClusterInfo()
330
    if self.op.disk_template is None:
331
      # FIXME: It would be better to take the default disk template from the
332
      # ipolicy, but for the ipolicy we need the primary node, which we get from
333
      # the iallocator, which wants the disk template as input. To solve this
334
      # chicken-and-egg problem, it should be possible to specify just a node
335
      # group from the iallocator and take the ipolicy from that.
336
      self.op.disk_template = cluster.enabled_disk_templates[0]
337
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
338

    
339
  def _CheckDiskArguments(self):
340
    """Checks validity of disk-related arguments.
341

342
    """
343
    # check that disk's names are unique and valid
344
    utils.ValidateDeviceNames("disk", self.op.disks)
345

    
346
    self._CheckDiskTemplateValid()
347

    
348
    # check disks. parameter names and consistent adopt/no-adopt strategy
349
    has_adopt = has_no_adopt = False
350
    for disk in self.op.disks:
351
      if self.op.disk_template != constants.DT_EXT:
352
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
353
      if constants.IDISK_ADOPT in disk:
354
        has_adopt = True
355
      else:
356
        has_no_adopt = True
357
    if has_adopt and has_no_adopt:
358
      raise errors.OpPrereqError("Either all disks are adopted or none is",
359
                                 errors.ECODE_INVAL)
360
    if has_adopt:
361
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
362
        raise errors.OpPrereqError("Disk adoption is not supported for the"
363
                                   " '%s' disk template" %
364
                                   self.op.disk_template,
365
                                   errors.ECODE_INVAL)
366
      if self.op.iallocator is not None:
367
        raise errors.OpPrereqError("Disk adoption not allowed with an"
368
                                   " iallocator script", errors.ECODE_INVAL)
369
      if self.op.mode == constants.INSTANCE_IMPORT:
370
        raise errors.OpPrereqError("Disk adoption not allowed for"
371
                                   " instance import", errors.ECODE_INVAL)
372
    else:
373
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
374
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
375
                                   " but no 'adopt' parameter given" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378

    
379
    self.adopt_disks = has_adopt
380

    
381
  def _CheckVLANArguments(self):
382
    """ Check validity of VLANs if given
383

384
    """
385
    for nic in self.op.nics:
386
      vlan = nic.get(constants.INIC_VLAN, None)
387
      if vlan:
388
        if vlan[0] == ".":
389
          # vlan starting with dot means single untagged vlan,
390
          # might be followed by trunk (:)
391
          if not vlan[1:].isdigit():
392
            vlanlist = vlan[1:].split(':')
393
            for vl in vlanlist:
394
              if not vl.isdigit():
395
                raise errors.OpPrereqError("Specified VLAN parameter is "
396
                                           "invalid : %s" % vlan,
397
                                             errors.ECODE_INVAL)
398
        elif vlan[0] == ":":
399
          # Trunk - tagged only
400
          vlanlist = vlan[1:].split(':')
401
          for vl in vlanlist:
402
            if not vl.isdigit():
403
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
404
                                           " : %s" % vlan, errors.ECODE_INVAL)
405
        elif vlan.isdigit():
406
          # This is the simplest case. No dots, only single digit
407
          # -> Create untagged access port, dot needs to be added
408
          nic[constants.INIC_VLAN] = "." + vlan
409
        else:
410
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                       " : %s" % vlan, errors.ECODE_INVAL)
412

    
413
  def CheckArguments(self):
414
    """Check arguments.
415

416
    """
417
    # do not require name_check to ease forward/backward compatibility
418
    # for tools
419
    if self.op.no_install and self.op.start:
420
      self.LogInfo("No-installation mode selected, disabling startup")
421
      self.op.start = False
422
    # validate/normalize the instance name
423
    self.op.instance_name = \
424
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
425

    
426
    if self.op.ip_check and not self.op.name_check:
427
      # TODO: make the ip check more flexible and not depend on the name check
428
      raise errors.OpPrereqError("Cannot do IP address check without a name"
429
                                 " check", errors.ECODE_INVAL)
430

    
431
    # add NIC for instance communication
432
    if self.op.instance_communication:
433
      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
434

    
435
      self.op.nics.append({constants.INIC_NAME: nic_name,
436
                           constants.INIC_MAC: constants.VALUE_GENERATE,
437
                           constants.INIC_IP: constants.NIC_IP_POOL,
438
                           constants.INIC_NETWORK:
439
                             self.cfg.GetInstanceCommunicationNetwork()})
440

    
441
    # check nics' parameter names
442
    for nic in self.op.nics:
443
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
444
    # check that NIC's parameters names are unique and valid
445
    utils.ValidateDeviceNames("NIC", self.op.nics)
446

    
447
    self._CheckVLANArguments()
448

    
449
    self._CheckDiskArguments()
450
    assert self.op.disk_template is not None
451

    
452
    # instance name verification
453
    if self.op.name_check:
454
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
455
      self.op.instance_name = self.hostname.name
456
      # used in CheckPrereq for ip ping check
457
      self.check_ip = self.hostname.ip
458
    else:
459
      self.check_ip = None
460

    
461
    # file storage checks
462
    if (self.op.file_driver and
463
        not self.op.file_driver in constants.FILE_DRIVER):
464
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
465
                                 self.op.file_driver, errors.ECODE_INVAL)
466

    
467
    # set default file_driver if unset and required
468
    if (not self.op.file_driver and
469
        self.op.disk_template in constants.DTS_FILEBASED):
470
      self.op.file_driver = constants.FD_LOOP
471

    
472
    ### Node/iallocator related checks
473
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
474

    
475
    if self.op.pnode is not None:
476
      if self.op.disk_template in constants.DTS_INT_MIRROR:
477
        if self.op.snode is None:
478
          raise errors.OpPrereqError("The networked disk templates need"
479
                                     " a mirror node", errors.ECODE_INVAL)
480
      elif self.op.snode:
481
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
482
                        " template")
483
        self.op.snode = None
484

    
485
    _CheckOpportunisticLocking(self.op)
486

    
487
    if self.op.mode == constants.INSTANCE_IMPORT:
488
      # On import force_variant must be True, because if we forced it at
489
      # initial install, our only chance when importing it back is that it
490
      # works again!
491
      self.op.force_variant = True
492

    
493
      if self.op.no_install:
494
        self.LogInfo("No-installation mode has no effect during import")
495

    
496
    elif self.op.mode == constants.INSTANCE_CREATE:
497
      if self.op.os_type is None:
498
        raise errors.OpPrereqError("No guest OS specified",
499
                                   errors.ECODE_INVAL)
500
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
501
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
502
                                   " installation" % self.op.os_type,
503
                                   errors.ECODE_STATE)
504
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
505
      self._cds = GetClusterDomainSecret()
506

    
507
      # Check handshake to ensure both clusters have the same domain secret
508
      src_handshake = self.op.source_handshake
509
      if not src_handshake:
510
        raise errors.OpPrereqError("Missing source handshake",
511
                                   errors.ECODE_INVAL)
512

    
513
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
514
                                                           src_handshake)
515
      if errmsg:
516
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
517
                                   errors.ECODE_INVAL)
518

    
519
      # Load and check source CA
520
      self.source_x509_ca_pem = self.op.source_x509_ca
521
      if not self.source_x509_ca_pem:
522
        raise errors.OpPrereqError("Missing source X509 CA",
523
                                   errors.ECODE_INVAL)
524

    
525
      try:
526
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
527
                                                    self._cds)
528
      except OpenSSL.crypto.Error, err:
529
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
530
                                   (err, ), errors.ECODE_INVAL)
531

    
532
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
533
      if errcode is not None:
534
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
535
                                   errors.ECODE_INVAL)
536

    
537
      self.source_x509_ca = cert
538

    
539
      src_instance_name = self.op.source_instance_name
540
      if not src_instance_name:
541
        raise errors.OpPrereqError("Missing source instance name",
542
                                   errors.ECODE_INVAL)
543

    
544
      self.source_instance_name = \
545
        netutils.GetHostname(name=src_instance_name).name
546

    
547
    else:
548
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
549
                                 self.op.mode, errors.ECODE_INVAL)
550

    
551
  def ExpandNames(self):
552
    """ExpandNames for CreateInstance.
553

554
    Figure out the right locks for instance creation.
555

556
    """
557
    self.needed_locks = {}
558

    
559
    # this is just a preventive check, but someone might still add this
560
    # instance in the meantime, and creation will fail at lock-add time
561
    if self.op.instance_name in\
562
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
563
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
564
                                 self.op.instance_name, errors.ECODE_EXISTS)
565

    
566
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
567

    
568
    if self.op.iallocator:
569
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
570
      # specifying a group on instance creation and then selecting nodes from
571
      # that group
572
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
573
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
574

    
575
      if self.op.opportunistic_locking:
576
        self.opportunistic_locks[locking.LEVEL_NODE] = True
577
    else:
578
      (self.op.pnode_uuid, self.op.pnode) = \
579
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
580
      nodelist = [self.op.pnode_uuid]
581
      if self.op.snode is not None:
582
        (self.op.snode_uuid, self.op.snode) = \
583
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
584
        nodelist.append(self.op.snode_uuid)
585
      self.needed_locks[locking.LEVEL_NODE] = nodelist
586

    
587
    # in case of import lock the source node too
588
    if self.op.mode == constants.INSTANCE_IMPORT:
589
      src_node = self.op.src_node
590
      src_path = self.op.src_path
591

    
592
      if src_path is None:
593
        self.op.src_path = src_path = self.op.instance_name
594

    
595
      if src_node is None:
596
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
597
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
598
        self.op.src_node = None
599
        if os.path.isabs(src_path):
600
          raise errors.OpPrereqError("Importing an instance from a path"
601
                                     " requires a source node option",
602
                                     errors.ECODE_INVAL)
603
      else:
604
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
605
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
606
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
607
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
608
        if not os.path.isabs(src_path):
609
          self.op.src_path = \
610
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
611

    
612
    self.needed_locks[locking.LEVEL_NODE_RES] = \
613
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
614

    
615
    # Optimistically acquire shared group locks (we're reading the
616
    # configuration).  We can't just call GetInstanceNodeGroups, because the
617
    # instance doesn't exist yet. Therefore we lock all node groups of all
618
    # nodes we have.
619
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
620
      # In the case we lock all nodes for opportunistic allocation, we have no
621
      # choice than to lock all groups, because they're allocated before nodes.
622
      # This is sad, but true. At least we release all those we don't need in
623
      # CheckPrereq later.
624
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
625
    else:
626
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
627
        list(self.cfg.GetNodeGroupsFromNodes(
628
          self.needed_locks[locking.LEVEL_NODE]))
629
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
630

    
631
  def DeclareLocks(self, level):
632
    if level == locking.LEVEL_NODE_RES and \
633
      self.opportunistic_locks[locking.LEVEL_NODE]:
634
      # Even when using opportunistic locking, we require the same set of
635
      # NODE_RES locks as we got NODE locks
636
      self.needed_locks[locking.LEVEL_NODE_RES] = \
637
        self.owned_locks(locking.LEVEL_NODE)
638

    
639
  def _RunAllocator(self):
640
    """Run the allocator based on input opcode.
641

642
    """
643
    if self.op.opportunistic_locking:
644
      # Only consider nodes for which a lock is held
645
      node_name_whitelist = self.cfg.GetNodeNames(
646
        self.owned_locks(locking.LEVEL_NODE))
647
    else:
648
      node_name_whitelist = None
649

    
650
    req = _CreateInstanceAllocRequest(self.op, self.disks,
651
                                      self.nics, self.be_full,
652
                                      node_name_whitelist)
653
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
654

    
655
    ial.Run(self.op.iallocator)
656

    
657
    if not ial.success:
658
      # When opportunistic locks are used only a temporary failure is generated
659
      if self.op.opportunistic_locking:
660
        ecode = errors.ECODE_TEMP_NORES
661
      else:
662
        ecode = errors.ECODE_NORES
663

    
664
      raise errors.OpPrereqError("Can't compute nodes using"
665
                                 " iallocator '%s': %s" %
666
                                 (self.op.iallocator, ial.info),
667
                                 ecode)
668

    
669
    (self.op.pnode_uuid, self.op.pnode) = \
670
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
671
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
672
                 self.op.instance_name, self.op.iallocator,
673
                 utils.CommaJoin(ial.result))
674

    
675
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
676

    
677
    if req.RequiredNodes() == 2:
678
      (self.op.snode_uuid, self.op.snode) = \
679
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
680

    
681
  def BuildHooksEnv(self):
682
    """Build hooks env.
683

684
    This runs on master, primary and secondary nodes of the instance.
685

686
    """
687
    env = {
688
      "ADD_MODE": self.op.mode,
689
      }
690
    if self.op.mode == constants.INSTANCE_IMPORT:
691
      env["SRC_NODE"] = self.op.src_node
692
      env["SRC_PATH"] = self.op.src_path
693
      env["SRC_IMAGES"] = self.src_images
694

    
695
    env.update(BuildInstanceHookEnv(
696
      name=self.op.instance_name,
697
      primary_node_name=self.op.pnode,
698
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
699
      status=self.op.start,
700
      os_type=self.op.os_type,
701
      minmem=self.be_full[constants.BE_MINMEM],
702
      maxmem=self.be_full[constants.BE_MAXMEM],
703
      vcpus=self.be_full[constants.BE_VCPUS],
704
      nics=NICListToTuple(self, self.nics),
705
      disk_template=self.op.disk_template,
706
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
707
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
708
             for d in self.disks],
709
      bep=self.be_full,
710
      hvp=self.hv_full,
711
      hypervisor_name=self.op.hypervisor,
712
      tags=self.op.tags,
713
      ))
714

    
715
    return env
716

    
717
  def BuildHooksNodes(self):
718
    """Build hooks nodes.
719

720
    """
721
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
722
    return nl, nl
723

    
724
  def _ReadExportInfo(self):
725
    """Reads the export information from disk.
726

727
    It will override the opcode source node and path with the actual
728
    information, if these two were not specified before.
729

730
    @return: the export information
731

732
    """
733
    assert self.op.mode == constants.INSTANCE_IMPORT
734

    
735
    if self.op.src_node_uuid is None:
736
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
737
      exp_list = self.rpc.call_export_list(locked_nodes)
738
      found = False
739
      for node_uuid in exp_list:
740
        if exp_list[node_uuid].fail_msg:
741
          continue
742
        if self.op.src_path in exp_list[node_uuid].payload:
743
          found = True
744
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
745
          self.op.src_node_uuid = node_uuid
746
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
747
                                            self.op.src_path)
748
          break
749
      if not found:
750
        raise errors.OpPrereqError("No export found for relative path %s" %
751
                                   self.op.src_path, errors.ECODE_INVAL)
752

    
753
    CheckNodeOnline(self, self.op.src_node_uuid)
754
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
755
    result.Raise("No export or invalid export found in dir %s" %
756
                 self.op.src_path)
757

    
758
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
759
    if not export_info.has_section(constants.INISECT_EXP):
760
      raise errors.ProgrammerError("Corrupted export config",
761
                                   errors.ECODE_ENVIRON)
762

    
763
    ei_version = export_info.get(constants.INISECT_EXP, "version")
764
    if int(ei_version) != constants.EXPORT_VERSION:
765
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
766
                                 (ei_version, constants.EXPORT_VERSION),
767
                                 errors.ECODE_ENVIRON)
768
    return export_info
769

    
770
  def _ReadExportParams(self, einfo):
771
    """Use export parameters as defaults.
772

773
    In case the opcode doesn't specify (as in override) some instance
774
    parameters, then try to use them from the export information, if
775
    that declares them.
776

777
    """
778
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
779

    
780
    if not self.op.disks:
781
      disks = []
782
      # TODO: import the disk iv_name too
783
      for idx in range(constants.MAX_DISKS):
784
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
785
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
786
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
787
          disk = {
788
            constants.IDISK_SIZE: disk_sz,
789
            constants.IDISK_NAME: disk_name
790
            }
791
          disks.append(disk)
792
      self.op.disks = disks
793
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
794
        raise errors.OpPrereqError("No disk info specified and the export"
795
                                   " is missing the disk information",
796
                                   errors.ECODE_INVAL)
797

    
798
    if not self.op.nics:
799
      nics = []
800
      for idx in range(constants.MAX_NICS):
801
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
802
          ndict = {}
803
          for name in [constants.INIC_IP,
804
                       constants.INIC_MAC, constants.INIC_NAME]:
805
            nic_param_name = "nic%d_%s" % (idx, name)
806
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
807
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
808
              ndict[name] = v
809
          network = einfo.get(constants.INISECT_INS,
810
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
811
          # in case network is given link and mode are inherited
812
          # from nodegroup's netparams and thus should not be passed here
813
          if network:
814
            ndict[constants.INIC_NETWORK] = network
815
          else:
816
            for name in list(constants.NICS_PARAMETERS):
817
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
818
              ndict[name] = v
819
          nics.append(ndict)
820
        else:
821
          break
822
      self.op.nics = nics
823

    
824
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
825
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
826

    
827
    if (self.op.hypervisor is None and
828
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
829
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
830

    
831
    if einfo.has_section(constants.INISECT_HYP):
832
      # use the export parameters but do not override the ones
833
      # specified by the user
834
      for name, value in einfo.items(constants.INISECT_HYP):
835
        if name not in self.op.hvparams:
836
          self.op.hvparams[name] = value
837

    
838
    if einfo.has_section(constants.INISECT_BEP):
839
      # use the parameters, without overriding
840
      for name, value in einfo.items(constants.INISECT_BEP):
841
        if name not in self.op.beparams:
842
          self.op.beparams[name] = value
843
        # Compatibility for the old "memory" be param
844
        if name == constants.BE_MEMORY:
845
          if constants.BE_MAXMEM not in self.op.beparams:
846
            self.op.beparams[constants.BE_MAXMEM] = value
847
          if constants.BE_MINMEM not in self.op.beparams:
848
            self.op.beparams[constants.BE_MINMEM] = value
849
    else:
850
      # try to read the parameters old style, from the main section
851
      for name in constants.BES_PARAMETERS:
852
        if (name not in self.op.beparams and
853
            einfo.has_option(constants.INISECT_INS, name)):
854
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
855

    
856
    if einfo.has_section(constants.INISECT_OSP):
857
      # use the parameters, without overriding
858
      for name, value in einfo.items(constants.INISECT_OSP):
859
        if name not in self.op.osparams:
860
          self.op.osparams[name] = value
861

    
862
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
863
      # use the parameters, without overriding
864
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
865
        if name not in self.op.osparams_private:
866
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
867

    
868
  def _RevertToDefaults(self, cluster):
869
    """Revert the instance parameters to the default values.
870

871
    """
872
    # hvparams
873
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
874
    for name in self.op.hvparams.keys():
875
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
876
        del self.op.hvparams[name]
877
    # beparams
878
    be_defs = cluster.SimpleFillBE({})
879
    for name in self.op.beparams.keys():
880
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
881
        del self.op.beparams[name]
882
    # nic params
883
    nic_defs = cluster.SimpleFillNIC({})
884
    for nic in self.op.nics:
885
      for name in constants.NICS_PARAMETERS:
886
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
887
          del nic[name]
888
    # osparams
889
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
890
    for name in self.op.osparams.keys():
891
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
892
        del self.op.osparams[name]
893

    
894
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
895
                                    os_params_private={})
896
    for name in self.op.osparams_private.keys():
897
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
898
        del self.op.osparams_private[name]
899

    
900
  def _CalculateFileStorageDir(self):
901
    """Calculate final instance file storage dir.
902

903
    """
904
    # file storage dir calculation/check
905
    self.instance_file_storage_dir = None
906
    if self.op.disk_template in constants.DTS_FILEBASED:
907
      # build the full file storage dir path
908
      joinargs = []
909

    
910
      cfg_storage = None
911
      if self.op.disk_template == constants.DT_FILE:
912
        cfg_storage = self.cfg.GetFileStorageDir()
913
      elif self.op.disk_template == constants.DT_SHARED_FILE:
914
        cfg_storage = self.cfg.GetSharedFileStorageDir()
915
      elif self.op.disk_template == constants.DT_GLUSTER:
916
        cfg_storage = self.cfg.GetGlusterStorageDir()
917

    
918
      if not cfg_storage:
919
        raise errors.OpPrereqError(
920
          "Cluster file storage dir for {tpl} storage type not defined".format(
921
            tpl=repr(self.op.disk_template)
922
          ),
923
          errors.ECODE_STATE
924
      )
925

    
926
      joinargs.append(cfg_storage)
927

    
928
      if self.op.file_storage_dir is not None:
929
        joinargs.append(self.op.file_storage_dir)
930

    
931
      if self.op.disk_template != constants.DT_GLUSTER:
932
        joinargs.append(self.op.instance_name)
933

    
934
      if len(joinargs) > 1:
935
        # pylint: disable=W0142
936
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
937
      else:
938
        self.instance_file_storage_dir = joinargs[0]
939

    
940
  def CheckPrereq(self): # pylint: disable=R0914
941
    """Check prerequisites.
942

943
    """
944
    # Check that the optimistically acquired groups are correct wrt the
945
    # acquired nodes
946
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
947
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
948
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
949
    if not owned_groups.issuperset(cur_groups):
950
      raise errors.OpPrereqError("New instance %s's node groups changed since"
951
                                 " locks were acquired, current groups are"
952
                                 " are '%s', owning groups '%s'; retry the"
953
                                 " operation" %
954
                                 (self.op.instance_name,
955
                                  utils.CommaJoin(cur_groups),
956
                                  utils.CommaJoin(owned_groups)),
957
                                 errors.ECODE_STATE)
958

    
959
    self._CalculateFileStorageDir()
960

    
961
    if self.op.mode == constants.INSTANCE_IMPORT:
962
      export_info = self._ReadExportInfo()
963
      self._ReadExportParams(export_info)
964
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
965
    else:
966
      self._old_instance_name = None
967

    
968
    if (not self.cfg.GetVGName() and
969
        self.op.disk_template not in constants.DTS_NOT_LVM):
970
      raise errors.OpPrereqError("Cluster does not support lvm-based"
971
                                 " instances", errors.ECODE_STATE)
972

    
973
    if (self.op.hypervisor is None or
974
        self.op.hypervisor == constants.VALUE_AUTO):
975
      self.op.hypervisor = self.cfg.GetHypervisorType()
976

    
977
    cluster = self.cfg.GetClusterInfo()
978
    enabled_hvs = cluster.enabled_hypervisors
979
    if self.op.hypervisor not in enabled_hvs:
980
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
981
                                 " cluster (%s)" %
982
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
983
                                 errors.ECODE_STATE)
984

    
985
    # Check tag validity
986
    for tag in self.op.tags:
987
      objects.TaggableObject.ValidateTag(tag)
988

    
989
    # check hypervisor parameter syntax (locally)
990
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
991
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
992
                                      self.op.hvparams)
993
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
994
    hv_type.CheckParameterSyntax(filled_hvp)
995
    self.hv_full = filled_hvp
996
    # check that we don't specify global parameters on an instance
997
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
998
                         "instance", "cluster")
999

    
1000
    # fill and remember the beparams dict
1001
    self.be_full = _ComputeFullBeParams(self.op, cluster)
1002

    
1003
    # build os parameters
1004
    if self.op.osparams_private is None:
1005
      self.op.osparams_private = serializer.PrivateDict()
1006
    if self.op.osparams_secret is None:
1007
      self.op.osparams_secret = serializer.PrivateDict()
1008

    
1009
    self.os_full = cluster.SimpleFillOS(
1010
      self.op.os_type,
1011
      self.op.osparams,
1012
      os_params_private=self.op.osparams_private,
1013
      os_params_secret=self.op.osparams_secret
1014
    )
1015

    
1016
    # now that hvp/bep are in final format, let's reset to defaults,
1017
    # if told to do so
1018
    if self.op.identify_defaults:
1019
      self._RevertToDefaults(cluster)
1020

    
1021
    # NIC buildup
1022
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1023
                             self.proc.GetECId())
1024

    
1025
    # disk checks/pre-build
1026
    default_vg = self.cfg.GetVGName()
1027
    self.disks = ComputeDisks(self.op, default_vg)
1028

    
1029
    if self.op.mode == constants.INSTANCE_IMPORT:
1030
      disk_images = []
1031
      for idx in range(len(self.disks)):
1032
        option = "disk%d_dump" % idx
1033
        if export_info.has_option(constants.INISECT_INS, option):
1034
          # FIXME: are the old os-es, disk sizes, etc. useful?
1035
          export_name = export_info.get(constants.INISECT_INS, option)
1036
          image = utils.PathJoin(self.op.src_path, export_name)
1037
          disk_images.append(image)
1038
        else:
1039
          disk_images.append(False)
1040

    
1041
      self.src_images = disk_images
1042

    
1043
      if self.op.instance_name == self._old_instance_name:
1044
        for idx, nic in enumerate(self.nics):
1045
          if nic.mac == constants.VALUE_AUTO:
1046
            nic_mac_ini = "nic%d_mac" % idx
1047
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1048

    
1049
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1050

    
1051
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1052
    if self.op.ip_check:
1053
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1054
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1055
                                   (self.check_ip, self.op.instance_name),
1056
                                   errors.ECODE_NOTUNIQUE)
1057

    
1058
    #### mac address generation
1059
    # By generating here the mac address both the allocator and the hooks get
1060
    # the real final mac address rather than the 'auto' or 'generate' value.
1061
    # There is a race condition between the generation and the instance object
1062
    # creation, which means that we know the mac is valid now, but we're not
1063
    # sure it will be when we actually add the instance. If things go bad
1064
    # adding the instance will abort because of a duplicate mac, and the
1065
    # creation job will fail.
1066
    for nic in self.nics:
1067
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1068
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1069

    
1070
    #### allocator run
1071

    
1072
    if self.op.iallocator is not None:
1073
      self._RunAllocator()
1074

    
1075
    # Release all unneeded node locks
1076
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1077
                               self.op.src_node_uuid])
1078
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1079
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1080
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1081
    # Release all unneeded group locks
1082
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1083
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1084

    
1085
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1086
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1087
      "Node locks differ from node resource locks"
1088

    
1089
    #### node related checks
1090

    
1091
    # check primary node
1092
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1093
    assert self.pnode is not None, \
1094
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1095
    if pnode.offline:
1096
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1097
                                 pnode.name, errors.ECODE_STATE)
1098
    if pnode.drained:
1099
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1100
                                 pnode.name, errors.ECODE_STATE)
1101
    if not pnode.vm_capable:
1102
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1103
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1104

    
1105
    self.secondaries = []
1106

    
1107
    # Fill in any IPs from IP pools. This must happen here, because we need to
1108
    # know the nic's primary node, as specified by the iallocator
1109
    for idx, nic in enumerate(self.nics):
1110
      net_uuid = nic.network
1111
      if net_uuid is not None:
1112
        nobj = self.cfg.GetNetwork(net_uuid)
1113
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1114
        if netparams is None:
1115
          raise errors.OpPrereqError("No netparams found for network"
1116
                                     " %s. Probably not connected to"
1117
                                     " node's %s nodegroup" %
1118
                                     (nobj.name, self.pnode.name),
1119
                                     errors.ECODE_INVAL)
1120
        self.LogInfo("NIC/%d inherits netparams %s" %
1121
                     (idx, netparams.values()))
1122
        nic.nicparams = dict(netparams)
1123
        if nic.ip is not None:
1124
          if nic.ip.lower() == constants.NIC_IP_POOL:
1125
            try:
1126
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1127
            except errors.ReservationError:
1128
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1129
                                         " from the address pool" % idx,
1130
                                         errors.ECODE_STATE)
1131
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1132
          else:
1133
            try:
1134
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1135
                                 check=self.op.conflicts_check)
1136
            except errors.ReservationError:
1137
              raise errors.OpPrereqError("IP address %s already in use"
1138
                                         " or does not belong to network %s" %
1139
                                         (nic.ip, nobj.name),
1140
                                         errors.ECODE_NOTUNIQUE)
1141

    
1142
      # net is None, ip None or given
1143
      elif self.op.conflicts_check:
1144
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1145

    
1146
    # mirror node verification
1147
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1148
      if self.op.snode_uuid == pnode.uuid:
1149
        raise errors.OpPrereqError("The secondary node cannot be the"
1150
                                   " primary node", errors.ECODE_INVAL)
1151
      CheckNodeOnline(self, self.op.snode_uuid)
1152
      CheckNodeNotDrained(self, self.op.snode_uuid)
1153
      CheckNodeVmCapable(self, self.op.snode_uuid)
1154
      self.secondaries.append(self.op.snode_uuid)
1155

    
1156
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1157
      if pnode.group != snode.group:
1158
        self.LogWarning("The primary and secondary nodes are in two"
1159
                        " different node groups; the disk parameters"
1160
                        " from the first disk's node group will be"
1161
                        " used")
1162

    
1163
    nodes = [pnode]
1164
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1165
      nodes.append(snode)
1166
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1167
    excl_stor = compat.any(map(has_es, nodes))
1168
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1169
      raise errors.OpPrereqError("Disk template %s not supported with"
1170
                                 " exclusive storage" % self.op.disk_template,
1171
                                 errors.ECODE_STATE)
1172
    for disk in self.disks:
1173
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1174

    
1175
    node_uuids = [pnode.uuid] + self.secondaries
1176

    
1177
    if not self.adopt_disks:
1178
      if self.op.disk_template == constants.DT_RBD:
1179
        # _CheckRADOSFreeSpace() is just a placeholder.
1180
        # Any function that checks prerequisites can be placed here.
1181
        # Check if there is enough space on the RADOS cluster.
1182
        CheckRADOSFreeSpace()
1183
      elif self.op.disk_template == constants.DT_EXT:
1184
        # FIXME: Function that checks prereqs if needed
1185
        pass
1186
      elif self.op.disk_template in constants.DTS_LVM:
1187
        # Check lv size requirements, if not adopting
1188
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1189
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1190
      else:
1191
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1192
        pass
1193

    
1194
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1195
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1196
                                disk[constants.IDISK_ADOPT])
1197
                     for disk in self.disks])
1198
      if len(all_lvs) != len(self.disks):
1199
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1200
                                   errors.ECODE_INVAL)
1201
      for lv_name in all_lvs:
1202
        try:
1203
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1204
          # to ReserveLV uses the same syntax
1205
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1206
        except errors.ReservationError:
1207
          raise errors.OpPrereqError("LV named %s used by another instance" %
1208
                                     lv_name, errors.ECODE_NOTUNIQUE)
1209

    
1210
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1211
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1212

    
1213
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1214
                                       vg_names.payload.keys())[pnode.uuid]
1215
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1216
      node_lvs = node_lvs.payload
1217

    
1218
      delta = all_lvs.difference(node_lvs.keys())
1219
      if delta:
1220
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1221
                                   utils.CommaJoin(delta),
1222
                                   errors.ECODE_INVAL)
1223
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1224
      if online_lvs:
1225
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1226
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1227
                                   errors.ECODE_STATE)
1228
      # update the size of disk based on what is found
1229
      for dsk in self.disks:
1230
        dsk[constants.IDISK_SIZE] = \
1231
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1232
                                        dsk[constants.IDISK_ADOPT])][0]))
1233

    
1234
    elif self.op.disk_template == constants.DT_BLOCK:
1235
      # Normalize and de-duplicate device paths
1236
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1237
                       for disk in self.disks])
1238
      if len(all_disks) != len(self.disks):
1239
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1240
                                   errors.ECODE_INVAL)
1241
      baddisks = [d for d in all_disks
1242
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1243
      if baddisks:
1244
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1245
                                   " cannot be adopted" %
1246
                                   (utils.CommaJoin(baddisks),
1247
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1248
                                   errors.ECODE_INVAL)
1249

    
1250
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1251
                                            list(all_disks))[pnode.uuid]
1252
      node_disks.Raise("Cannot get block device information from node %s" %
1253
                       pnode.name)
1254
      node_disks = node_disks.payload
1255
      delta = all_disks.difference(node_disks.keys())
1256
      if delta:
1257
        raise errors.OpPrereqError("Missing block device(s): %s" %
1258
                                   utils.CommaJoin(delta),
1259
                                   errors.ECODE_INVAL)
1260
      for dsk in self.disks:
1261
        dsk[constants.IDISK_SIZE] = \
1262
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1263

    
1264
    # Check disk access param to be compatible with specified hypervisor
1265
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1266
    node_group = self.cfg.GetNodeGroup(node_info.group)
1267
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1268
    access_type = disk_params[self.op.disk_template].get(
1269
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1270
    )
1271

    
1272
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1273
                                            self.op.disk_template,
1274
                                            access_type):
1275
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1276
                                 " used with %s disk access param" %
1277
                                 (self.op.hypervisor, access_type),
1278
                                  errors.ECODE_STATE)
1279

    
1280
    # Verify instance specs
1281
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1282
    ispec = {
1283
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1284
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1285
      constants.ISPEC_DISK_COUNT: len(self.disks),
1286
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1287
                                  for disk in self.disks],
1288
      constants.ISPEC_NIC_COUNT: len(self.nics),
1289
      constants.ISPEC_SPINDLE_USE: spindle_use,
1290
      }
1291

    
1292
    group_info = self.cfg.GetNodeGroup(pnode.group)
1293
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1294
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1295
                                               self.op.disk_template)
1296
    if not self.op.ignore_ipolicy and res:
1297
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1298
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1299
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1300

    
1301
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1302

    
1303
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1304
    # check OS parameters (remotely)
1305
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1306

    
1307
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1308

    
1309
    #TODO: _CheckExtParams (remotely)
1310
    # Check parameters for extstorage
1311

    
1312
    # memory check on primary node
1313
    #TODO(dynmem): use MINMEM for checking
1314
    if self.op.start:
1315
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1316
                                self.op.hvparams)
1317
      CheckNodeFreeMemory(self, self.pnode.uuid,
1318
                          "creating instance %s" % self.op.instance_name,
1319
                          self.be_full[constants.BE_MAXMEM],
1320
                          self.op.hypervisor, hvfull)
1321

    
1322
    self.dry_run_result = list(node_uuids)
1323

    
1324
  def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
1325
    """Removes degraded disks and instance.
1326

1327
    It optionally checks whether disks are degraded.  If the disks are
1328
    degraded, they are removed and the instance is also removed from
1329
    the configuration.
1330

1331
    If L{disk_abort} is True, then the disks are considered degraded
1332
    and removed, and the instance is removed from the configuration.
1333

1334
    If L{disk_abort} is False, then it first checks whether disks are
1335
    degraded and, if so, it removes the disks and the instance is
1336
    removed from the configuration.
1337

1338
    @type feedback_fn: callable
1339
    @param feedback_fn: function used send feedback back to the caller
1340

1341
    @type disk_abort: boolean
1342
    @param disk_abort:
1343
      True if disks are degraded, False to first check if disks are
1344
      degraded
1345

1346
    @type instance: L{objects.Instance}
1347
    @param instance: instance containing the disks to check
1348

1349
    @rtype: NoneType
1350
    @return: None
1351
    @raise errors.OpPrereqError: if disks are degraded
1352

1353
    """
1354
    if disk_abort:
1355
      pass
1356
    elif self.op.wait_for_sync:
1357
      disk_abort = not WaitForSync(self, instance)
1358
    elif instance.disk_template in constants.DTS_INT_MIRROR:
1359
      # make sure the disks are not degraded (still sync-ing is ok)
1360
      feedback_fn("* checking mirrors status")
1361
      disk_abort = not WaitForSync(self, instance, oneshot=True)
1362
    else:
1363
      disk_abort = False
1364

    
1365
    if disk_abort:
1366
      RemoveDisks(self, instance)
1367
      self.cfg.RemoveInstance(instance.uuid)
1368
      # Make sure the instance lock gets removed
1369
      self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
1370
      raise errors.OpExecError("There are some degraded disks for"
1371
                               " this instance")
1372

    
1373
  def Exec(self, feedback_fn):
1374
    """Create and add the instance to the cluster.
1375

1376
    """
1377
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1378
                self.owned_locks(locking.LEVEL_NODE)), \
1379
      "Node locks differ from node resource locks"
1380
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1381

    
1382
    ht_kind = self.op.hypervisor
1383
    if ht_kind in constants.HTS_REQ_PORT:
1384
      network_port = self.cfg.AllocatePort()
1385
    else:
1386
      network_port = None
1387

    
1388
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1389

    
1390
    # This is ugly but we got a chicken-egg problem here
1391
    # We can only take the group disk parameters, as the instance
1392
    # has no disks yet (we are generating them right here).
1393
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1394
    disks = GenerateDiskTemplate(self,
1395
                                 self.op.disk_template,
1396
                                 instance_uuid, self.pnode.uuid,
1397
                                 self.secondaries,
1398
                                 self.disks,
1399
                                 self.instance_file_storage_dir,
1400
                                 self.op.file_driver,
1401
                                 0,
1402
                                 feedback_fn,
1403
                                 self.cfg.GetGroupDiskParams(nodegroup))
1404

    
1405
    iobj = objects.Instance(name=self.op.instance_name,
1406
                            uuid=instance_uuid,
1407
                            os=self.op.os_type,
1408
                            primary_node=self.pnode.uuid,
1409
                            nics=self.nics, disks=disks,
1410
                            disk_template=self.op.disk_template,
1411
                            disks_active=False,
1412
                            admin_state=constants.ADMINST_DOWN,
1413
                            network_port=network_port,
1414
                            beparams=self.op.beparams,
1415
                            hvparams=self.op.hvparams,
1416
                            hypervisor=self.op.hypervisor,
1417
                            osparams=self.op.osparams,
1418
                            osparams_private=self.op.osparams_private,
1419
                            )
1420

    
1421
    if self.op.tags:
1422
      for tag in self.op.tags:
1423
        iobj.AddTag(tag)
1424

    
1425
    if self.adopt_disks:
1426
      if self.op.disk_template == constants.DT_PLAIN:
1427
        # rename LVs to the newly-generated names; we need to construct
1428
        # 'fake' LV disks with the old data, plus the new unique_id
1429
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1430
        rename_to = []
1431
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1432
          rename_to.append(t_dsk.logical_id)
1433
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1434
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1435
                                               zip(tmp_disks, rename_to))
1436
        result.Raise("Failed to rename adoped LVs")
1437
    else:
1438
      feedback_fn("* creating instance disks...")
1439
      try:
1440
        CreateDisks(self, iobj)
1441
      except errors.OpExecError:
1442
        self.LogWarning("Device creation failed")
1443
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1444
        raise
1445

    
1446
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1447

    
1448
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1449

    
1450
    # Declare that we don't want to remove the instance lock anymore, as we've
1451
    # added the instance to the config
1452
    del self.remove_locks[locking.LEVEL_INSTANCE]
1453

    
1454
    if self.op.mode == constants.INSTANCE_IMPORT:
1455
      # Release unused nodes
1456
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1457
    else:
1458
      # Release all nodes
1459
      ReleaseLocks(self, locking.LEVEL_NODE)
1460

    
1461
    # Wipe disks
1462
    disk_abort = False
1463
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1464
      feedback_fn("* wiping instance disks...")
1465
      try:
1466
        WipeDisks(self, iobj)
1467
      except errors.OpExecError, err:
1468
        logging.exception("Wiping disks failed")
1469
        self.LogWarning("Wiping instance disks failed (%s)", err)
1470
        disk_abort = True
1471

    
1472
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1473

    
1474

    
1475
    # instance disks are now active
1476
    iobj.disks_active = True
1477

    
1478
    # Release all node resource locks
1479
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1480

    
1481
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1482
      if self.op.mode == constants.INSTANCE_CREATE:
1483
        if not self.op.no_install:
1484
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1485
                        not self.op.wait_for_sync)
1486
          if pause_sync:
1487
            feedback_fn("* pausing disk sync to install instance OS")
1488
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1489
                                                              (iobj.disks,
1490
                                                               iobj), True)
1491
            for idx, success in enumerate(result.payload):
1492
              if not success:
1493
                logging.warn("pause-sync of instance %s for disk %d failed",
1494
                             self.op.instance_name, idx)
1495

    
1496
          feedback_fn("* running the instance OS create scripts...")
1497
          # FIXME: pass debug option from opcode to backend
1498
          os_add_result = \
1499
            self.rpc.call_instance_os_add(self.pnode.uuid,
1500
                                          (iobj, self.op.osparams_secret),
1501
                                          False,
1502
                                          self.op.debug_level)
1503
          if pause_sync:
1504
            feedback_fn("* resuming disk sync")
1505
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1506
                                                              (iobj.disks,
1507
                                                               iobj), False)
1508
            for idx, success in enumerate(result.payload):
1509
              if not success:
1510
                logging.warn("resume-sync of instance %s for disk %d failed",
1511
                             self.op.instance_name, idx)
1512

    
1513
          os_add_result.Raise("Could not add os for instance %s"
1514
                              " on node %s" % (self.op.instance_name,
1515
                                               self.pnode.name))
1516

    
1517
      else:
1518
        if self.op.mode == constants.INSTANCE_IMPORT:
1519
          feedback_fn("* running the instance OS import scripts...")
1520

    
1521
          transfers = []
1522

    
1523
          for idx, image in enumerate(self.src_images):
1524
            if not image:
1525
              continue
1526

    
1527
            # FIXME: pass debug option from opcode to backend
1528
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1529
                                               constants.IEIO_FILE, (image, ),
1530
                                               constants.IEIO_SCRIPT,
1531
                                               ((iobj.disks[idx], iobj), idx),
1532
                                               None)
1533
            transfers.append(dt)
1534

    
1535
          import_result = \
1536
            masterd.instance.TransferInstanceData(self, feedback_fn,
1537
                                                  self.op.src_node_uuid,
1538
                                                  self.pnode.uuid,
1539
                                                  self.pnode.secondary_ip,
1540
                                                  self.op.compress,
1541
                                                  iobj, transfers)
1542
          if not compat.all(import_result):
1543
            self.LogWarning("Some disks for instance %s on node %s were not"
1544
                            " imported successfully" % (self.op.instance_name,
1545
                                                        self.pnode.name))
1546

    
1547
          rename_from = self._old_instance_name
1548

    
1549
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1550
          feedback_fn("* preparing remote import...")
1551
          # The source cluster will stop the instance before attempting to make
1552
          # a connection. In some cases stopping an instance can take a long
1553
          # time, hence the shutdown timeout is added to the connection
1554
          # timeout.
1555
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1556
                             self.op.source_shutdown_timeout)
1557
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1558

    
1559
          assert iobj.primary_node == self.pnode.uuid
1560
          disk_results = \
1561
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1562
                                          self.source_x509_ca,
1563
                                          self._cds, self.op.compress, timeouts)
1564
          if not compat.all(disk_results):
1565
            # TODO: Should the instance still be started, even if some disks
1566
            # failed to import (valid for local imports, too)?
1567
            self.LogWarning("Some disks for instance %s on node %s were not"
1568
                            " imported successfully" % (self.op.instance_name,
1569
                                                        self.pnode.name))
1570

    
1571
          rename_from = self.source_instance_name
1572

    
1573
        else:
1574
          # also checked in the prereq part
1575
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1576
                                       % self.op.mode)
1577

    
1578
        # Run rename script on newly imported instance
1579
        assert iobj.name == self.op.instance_name
1580
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1581
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1582
                                                   rename_from,
1583
                                                   self.op.debug_level)
1584
        result.Warn("Failed to run rename script for %s on node %s" %
1585
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1586

    
1587
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1588

    
1589
    if self.op.start:
1590
      iobj.admin_state = constants.ADMINST_UP
1591
      self.cfg.Update(iobj, feedback_fn)
1592
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1593
                   self.pnode.name)
1594
      feedback_fn("* starting instance...")
1595
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1596
                                            False, self.op.reason)
1597
      result.Raise("Could not start instance")
1598

    
1599
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1600

    
1601

    
1602
class LUInstanceRename(LogicalUnit):
1603
  """Rename an instance.
1604

1605
  """
1606
  HPATH = "instance-rename"
1607
  HTYPE = constants.HTYPE_INSTANCE
1608

    
1609
  def CheckArguments(self):
1610
    """Check arguments.
1611

1612
    """
1613
    if self.op.ip_check and not self.op.name_check:
1614
      # TODO: make the ip check more flexible and not depend on the name check
1615
      raise errors.OpPrereqError("IP address check requires a name check",
1616
                                 errors.ECODE_INVAL)
1617

    
1618
  def BuildHooksEnv(self):
1619
    """Build hooks env.
1620

1621
    This runs on master, primary and secondary nodes of the instance.
1622

1623
    """
1624
    env = BuildInstanceHookEnvByObject(self, self.instance)
1625
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1626
    return env
1627

    
1628
  def BuildHooksNodes(self):
1629
    """Build hooks nodes.
1630

1631
    """
1632
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1633
    return (nl, nl)
1634

    
1635
  def CheckPrereq(self):
1636
    """Check prerequisites.
1637

1638
    This checks that the instance is in the cluster and is not running.
1639

1640
    """
1641
    (self.op.instance_uuid, self.op.instance_name) = \
1642
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1643
                                self.op.instance_name)
1644
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1645
    assert instance is not None
1646

    
1647
    # It should actually not happen that an instance is running with a disabled
1648
    # disk template, but in case it does, the renaming of file-based instances
1649
    # will fail horribly. Thus, we test it before.
1650
    if (instance.disk_template in constants.DTS_FILEBASED and
1651
        self.op.new_name != instance.name):
1652
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1653
                               instance.disk_template)
1654

    
1655
    CheckNodeOnline(self, instance.primary_node)
1656
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1657
                       msg="cannot rename")
1658
    self.instance = instance
1659

    
1660
    new_name = self.op.new_name
1661
    if self.op.name_check:
1662
      hostname = _CheckHostnameSane(self, new_name)
1663
      new_name = self.op.new_name = hostname.name
1664
      if (self.op.ip_check and
1665
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1666
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1667
                                   (hostname.ip, new_name),
1668
                                   errors.ECODE_NOTUNIQUE)
1669

    
1670
    instance_names = [inst.name for
1671
                      inst in self.cfg.GetAllInstancesInfo().values()]
1672
    if new_name in instance_names and new_name != instance.name:
1673
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1674
                                 new_name, errors.ECODE_EXISTS)
1675

    
1676
  def Exec(self, feedback_fn):
1677
    """Rename the instance.
1678

1679
    """
1680
    old_name = self.instance.name
1681

    
1682
    rename_file_storage = False
1683
    if (self.instance.disk_template in (constants.DT_FILE,
1684
                                        constants.DT_SHARED_FILE) and
1685
        self.op.new_name != self.instance.name):
1686
      old_file_storage_dir = os.path.dirname(
1687
                               self.instance.disks[0].logical_id[1])
1688
      rename_file_storage = True
1689

    
1690
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1691
    # Change the instance lock. This is definitely safe while we hold the BGL.
1692
    # Otherwise the new lock would have to be added in acquired mode.
1693
    assert self.REQ_BGL
1694
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1695
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1696
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1697

    
1698
    # re-read the instance from the configuration after rename
1699
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1700

    
1701
    if rename_file_storage:
1702
      new_file_storage_dir = os.path.dirname(
1703
                               renamed_inst.disks[0].logical_id[1])
1704
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1705
                                                     old_file_storage_dir,
1706
                                                     new_file_storage_dir)
1707
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1708
                   " (but the instance has been renamed in Ganeti)" %
1709
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1710
                    old_file_storage_dir, new_file_storage_dir))
1711

    
1712
    StartInstanceDisks(self, renamed_inst, None)
1713
    # update info on disks
1714
    info = GetInstanceInfoText(renamed_inst)
1715
    for (idx, disk) in enumerate(renamed_inst.disks):
1716
      for node_uuid in renamed_inst.all_nodes:
1717
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1718
                                                (disk, renamed_inst), info)
1719
        result.Warn("Error setting info on node %s for disk %s" %
1720
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1721
    try:
1722
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1723
                                                 renamed_inst, old_name,
1724
                                                 self.op.debug_level)
1725
      result.Warn("Could not run OS rename script for instance %s on node %s"
1726
                  " (but the instance has been renamed in Ganeti)" %
1727
                  (renamed_inst.name,
1728
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1729
                  self.LogWarning)
1730
    finally:
1731
      ShutdownInstanceDisks(self, renamed_inst)
1732

    
1733
    return renamed_inst.name
1734

    
1735

    
1736
class LUInstanceRemove(LogicalUnit):
1737
  """Remove an instance.
1738

1739
  """
1740
  HPATH = "instance-remove"
1741
  HTYPE = constants.HTYPE_INSTANCE
1742
  REQ_BGL = False
1743

    
1744
  def ExpandNames(self):
1745
    self._ExpandAndLockInstance()
1746
    self.needed_locks[locking.LEVEL_NODE] = []
1747
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1748
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1749

    
1750
  def DeclareLocks(self, level):
1751
    if level == locking.LEVEL_NODE:
1752
      self._LockInstancesNodes()
1753
    elif level == locking.LEVEL_NODE_RES:
1754
      # Copy node locks
1755
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1756
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1757

    
1758
  def BuildHooksEnv(self):
1759
    """Build hooks env.
1760

1761
    This runs on master, primary and secondary nodes of the instance.
1762

1763
    """
1764
    env = BuildInstanceHookEnvByObject(self, self.instance)
1765
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1766
    return env
1767

    
1768
  def BuildHooksNodes(self):
1769
    """Build hooks nodes.
1770

1771
    """
1772
    nl = [self.cfg.GetMasterNode()]
1773
    nl_post = list(self.instance.all_nodes) + nl
1774
    return (nl, nl_post)
1775

    
1776
  def CheckPrereq(self):
1777
    """Check prerequisites.
1778

1779
    This checks that the instance is in the cluster.
1780

1781
    """
1782
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1783
    assert self.instance is not None, \
1784
      "Cannot retrieve locked instance %s" % self.op.instance_name
1785

    
1786
  def Exec(self, feedback_fn):
1787
    """Remove the instance.
1788

1789
    """
1790
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1791
                 self.cfg.GetNodeName(self.instance.primary_node))
1792

    
1793
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1794
                                             self.instance,
1795
                                             self.op.shutdown_timeout,
1796
                                             self.op.reason)
1797
    if self.op.ignore_failures:
1798
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1799
    else:
1800
      result.Raise("Could not shutdown instance %s on node %s" %
1801
                   (self.instance.name,
1802
                    self.cfg.GetNodeName(self.instance.primary_node)))
1803

    
1804
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1805
            self.owned_locks(locking.LEVEL_NODE_RES))
1806
    assert not (set(self.instance.all_nodes) -
1807
                self.owned_locks(locking.LEVEL_NODE)), \
1808
      "Not owning correct locks"
1809

    
1810
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1811

    
1812

    
1813
class LUInstanceMove(LogicalUnit):
1814
  """Move an instance by data-copying.
1815

1816
  """
1817
  HPATH = "instance-move"
1818
  HTYPE = constants.HTYPE_INSTANCE
1819
  REQ_BGL = False
1820

    
1821
  def ExpandNames(self):
1822
    self._ExpandAndLockInstance()
1823
    (self.op.target_node_uuid, self.op.target_node) = \
1824
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1825
                            self.op.target_node)
1826
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1827
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1828
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1829

    
1830
  def DeclareLocks(self, level):
1831
    if level == locking.LEVEL_NODE:
1832
      self._LockInstancesNodes(primary_only=True)
1833
    elif level == locking.LEVEL_NODE_RES:
1834
      # Copy node locks
1835
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1836
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1837

    
1838
  def BuildHooksEnv(self):
1839
    """Build hooks env.
1840

1841
    This runs on master, primary and target nodes of the instance.
1842

1843
    """
1844
    env = {
1845
      "TARGET_NODE": self.op.target_node,
1846
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1847
      }
1848
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1849
    return env
1850

    
1851
  def BuildHooksNodes(self):
1852
    """Build hooks nodes.
1853

1854
    """
1855
    nl = [
1856
      self.cfg.GetMasterNode(),
1857
      self.instance.primary_node,
1858
      self.op.target_node_uuid,
1859
      ]
1860
    return (nl, nl)
1861

    
1862
  def CheckPrereq(self):
1863
    """Check prerequisites.
1864

1865
    This checks that the instance is in the cluster.
1866

1867
    """
1868
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1869
    assert self.instance is not None, \
1870
      "Cannot retrieve locked instance %s" % self.op.instance_name
1871

    
1872
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1873
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1874
                                 self.instance.disk_template,
1875
                                 errors.ECODE_STATE)
1876

    
1877
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1878
    assert target_node is not None, \
1879
      "Cannot retrieve locked node %s" % self.op.target_node
1880

    
1881
    self.target_node_uuid = target_node.uuid
1882
    if target_node.uuid == self.instance.primary_node:
1883
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1884
                                 (self.instance.name, target_node.name),
1885
                                 errors.ECODE_STATE)
1886

    
1887
    cluster = self.cfg.GetClusterInfo()
1888
    bep = cluster.FillBE(self.instance)
1889

    
1890
    for idx, dsk in enumerate(self.instance.disks):
1891
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1892
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1893
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1894
                                   " cannot copy" % idx, errors.ECODE_STATE)
1895

    
1896
    CheckNodeOnline(self, target_node.uuid)
1897
    CheckNodeNotDrained(self, target_node.uuid)
1898
    CheckNodeVmCapable(self, target_node.uuid)
1899
    group_info = self.cfg.GetNodeGroup(target_node.group)
1900
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1901
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1902
                           ignore=self.op.ignore_ipolicy)
1903

    
1904
    if self.instance.admin_state == constants.ADMINST_UP:
1905
      # check memory requirements on the target node
1906
      CheckNodeFreeMemory(
1907
          self, target_node.uuid, "failing over instance %s" %
1908
          self.instance.name, bep[constants.BE_MAXMEM],
1909
          self.instance.hypervisor,
1910
          cluster.hvparams[self.instance.hypervisor])
1911
    else:
1912
      self.LogInfo("Not checking memory on the secondary node as"
1913
                   " instance will not be started")
1914

    
1915
    # check bridge existance
1916
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1917

    
1918
  def Exec(self, feedback_fn):
1919
    """Move an instance.
1920

1921
    The move is done by shutting it down on its present node, copying
1922
    the data over (slow) and starting it on the new node.
1923

1924
    """
1925
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1926
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1927

    
1928
    self.LogInfo("Shutting down instance %s on source node %s",
1929
                 self.instance.name, source_node.name)
1930

    
1931
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1932
            self.owned_locks(locking.LEVEL_NODE_RES))
1933

    
1934
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1935
                                             self.op.shutdown_timeout,
1936
                                             self.op.reason)
1937
    if self.op.ignore_consistency:
1938
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1939
                  " anyway. Please make sure node %s is down. Error details" %
1940
                  (self.instance.name, source_node.name, source_node.name),
1941
                  self.LogWarning)
1942
    else:
1943
      result.Raise("Could not shutdown instance %s on node %s" %
1944
                   (self.instance.name, source_node.name))
1945

    
1946
    # create the target disks
1947
    try:
1948
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1949
    except errors.OpExecError:
1950
      self.LogWarning("Device creation failed")
1951
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1952
      raise
1953

    
1954
    errs = []
1955
    transfers = []
1956
    # activate, get path, create transfer jobs
1957
    for idx, disk in enumerate(self.instance.disks):
1958
      # FIXME: pass debug option from opcode to backend
1959
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1960
                                         constants.IEIO_RAW_DISK,
1961
                                         (disk, self.instance),
1962
                                         constants.IEIO_RAW_DISK,
1963
                                         (disk, self.instance),
1964
                                         None)
1965
      transfers.append(dt)
1966

    
1967
    import_result = \
1968
      masterd.instance.TransferInstanceData(self, feedback_fn,
1969
                                            source_node.uuid,
1970
                                            target_node.uuid,
1971
                                            target_node.secondary_ip,
1972
                                            self.op.compress,
1973
                                            self.instance, transfers)
1974
    if not compat.all(import_result):
1975
      errs.append("Failed to transfer instance data")
1976

    
1977
    if errs:
1978
      self.LogWarning("Some disks failed to copy, aborting")
1979
      try:
1980
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1981
      finally:
1982
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1983
        raise errors.OpExecError("Errors during disk copy: %s" %
1984
                                 (",".join(errs),))
1985

    
1986
    self.instance.primary_node = target_node.uuid
1987
    self.cfg.Update(self.instance, feedback_fn)
1988

    
1989
    self.LogInfo("Removing the disks on the original node")
1990
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1991

    
1992
    # Only start the instance if it's marked as up
1993
    if self.instance.admin_state == constants.ADMINST_UP:
1994
      self.LogInfo("Starting instance %s on node %s",
1995
                   self.instance.name, target_node.name)
1996

    
1997
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1998
                                          ignore_secondaries=True)
1999
      if not disks_ok:
2000
        ShutdownInstanceDisks(self, self.instance)
2001
        raise errors.OpExecError("Can't activate the instance's disks")
2002

    
2003
      result = self.rpc.call_instance_start(target_node.uuid,
2004
                                            (self.instance, None, None), False,
2005
                                            self.op.reason)
2006
      msg = result.fail_msg
2007
      if msg:
2008
        ShutdownInstanceDisks(self, self.instance)
2009
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
2010
                                 (self.instance.name, target_node.name, msg))
2011

    
2012

    
2013
class LUInstanceMultiAlloc(NoHooksLU):
2014
  """Allocates multiple instances at the same time.
2015

2016
  """
2017
  REQ_BGL = False
2018

    
2019
  def CheckArguments(self):
2020
    """Check arguments.
2021

2022
    """
2023
    nodes = []
2024
    for inst in self.op.instances:
2025
      if inst.iallocator is not None:
2026
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
2027
                                   " instance objects", errors.ECODE_INVAL)
2028
      nodes.append(bool(inst.pnode))
2029
      if inst.disk_template in constants.DTS_INT_MIRROR:
2030
        nodes.append(bool(inst.snode))
2031

    
2032
    has_nodes = compat.any(nodes)
2033
    if compat.all(nodes) ^ has_nodes:
2034
      raise errors.OpPrereqError("There are instance objects providing"
2035
                                 " pnode/snode while others do not",
2036
                                 errors.ECODE_INVAL)
2037

    
2038
    if not has_nodes and self.op.iallocator is None:
2039
      default_iallocator = self.cfg.GetDefaultIAllocator()
2040
      if default_iallocator:
2041
        self.op.iallocator = default_iallocator
2042
      else:
2043
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2044
                                   " given and no cluster-wide default"
2045
                                   " iallocator found; please specify either"
2046
                                   " an iallocator or nodes on the instances"
2047
                                   " or set a cluster-wide default iallocator",
2048
                                   errors.ECODE_INVAL)
2049

    
2050
    _CheckOpportunisticLocking(self.op)
2051

    
2052
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2053
    if dups:
2054
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2055
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2056

    
2057
  def ExpandNames(self):
2058
    """Calculate the locks.
2059

2060
    """
2061
    self.share_locks = ShareAll()
2062
    self.needed_locks = {
2063
      # iallocator will select nodes and even if no iallocator is used,
2064
      # collisions with LUInstanceCreate should be avoided
2065
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2066
      }
2067

    
2068
    if self.op.iallocator:
2069
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2070
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2071

    
2072
      if self.op.opportunistic_locking:
2073
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2074
    else:
2075
      nodeslist = []
2076
      for inst in self.op.instances:
2077
        (inst.pnode_uuid, inst.pnode) = \
2078
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2079
        nodeslist.append(inst.pnode_uuid)
2080
        if inst.snode is not None:
2081
          (inst.snode_uuid, inst.snode) = \
2082
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2083
          nodeslist.append(inst.snode_uuid)
2084

    
2085
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2086
      # Lock resources of instance's primary and secondary nodes (copy to
2087
      # prevent accidential modification)
2088
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2089

    
2090
  def DeclareLocks(self, level):
2091
    if level == locking.LEVEL_NODE_RES and \
2092
      self.opportunistic_locks[locking.LEVEL_NODE]:
2093
      # Even when using opportunistic locking, we require the same set of
2094
      # NODE_RES locks as we got NODE locks
2095
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2096
        self.owned_locks(locking.LEVEL_NODE)
2097

    
2098
  def CheckPrereq(self):
2099
    """Check prerequisite.
2100

2101
    """
2102
    if self.op.iallocator:
2103
      cluster = self.cfg.GetClusterInfo()
2104
      default_vg = self.cfg.GetVGName()
2105
      ec_id = self.proc.GetECId()
2106

    
2107
      if self.op.opportunistic_locking:
2108
        # Only consider nodes for which a lock is held
2109
        node_whitelist = self.cfg.GetNodeNames(
2110
                           list(self.owned_locks(locking.LEVEL_NODE)))
2111
      else:
2112
        node_whitelist = None
2113

    
2114
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2115
                                           _ComputeNics(op, cluster, None,
2116
                                                        self.cfg, ec_id),
2117
                                           _ComputeFullBeParams(op, cluster),
2118
                                           node_whitelist)
2119
               for op in self.op.instances]
2120

    
2121
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2122
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2123

    
2124
      ial.Run(self.op.iallocator)
2125

    
2126
      if not ial.success:
2127
        raise errors.OpPrereqError("Can't compute nodes using"
2128
                                   " iallocator '%s': %s" %
2129
                                   (self.op.iallocator, ial.info),
2130
                                   errors.ECODE_NORES)
2131

    
2132
      self.ia_result = ial.result
2133

    
2134
    if self.op.dry_run:
2135
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2136
        constants.JOB_IDS_KEY: [],
2137
        })
2138

    
2139
  def _ConstructPartialResult(self):
2140
    """Contructs the partial result.
2141

2142
    """
2143
    if self.op.iallocator:
2144
      (allocatable, failed_insts) = self.ia_result
2145
      allocatable_insts = map(compat.fst, allocatable)
2146
    else:
2147
      allocatable_insts = [op.instance_name for op in self.op.instances]
2148
      failed_insts = []
2149

    
2150
    return {
2151
      constants.ALLOCATABLE_KEY: allocatable_insts,
2152
      constants.FAILED_KEY: failed_insts,
2153
      }
2154

    
2155
  def Exec(self, feedback_fn):
2156
    """Executes the opcode.
2157

2158
    """
2159
    jobs = []
2160
    if self.op.iallocator:
2161
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2162
      (allocatable, failed) = self.ia_result
2163

    
2164
      for (name, node_names) in allocatable:
2165
        op = op2inst.pop(name)
2166

    
2167
        (op.pnode_uuid, op.pnode) = \
2168
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2169
        if len(node_names) > 1:
2170
          (op.snode_uuid, op.snode) = \
2171
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2172

    
2173
          jobs.append([op])
2174

    
2175
        missing = set(op2inst.keys()) - set(failed)
2176
        assert not missing, \
2177
          "Iallocator did return incomplete result: %s" % \
2178
          utils.CommaJoin(missing)
2179
    else:
2180
      jobs.extend([op] for op in self.op.instances)
2181

    
2182
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2183

    
2184

    
2185
class _InstNicModPrivate:
2186
  """Data structure for network interface modifications.
2187

2188
  Used by L{LUInstanceSetParams}.
2189

2190
  """
2191
  def __init__(self):
2192
    self.params = None
2193
    self.filled = None
2194

    
2195

    
2196
def _PrepareContainerMods(mods, private_fn):
2197
  """Prepares a list of container modifications by adding a private data field.
2198

2199
  @type mods: list of tuples; (operation, index, parameters)
2200
  @param mods: List of modifications
2201
  @type private_fn: callable or None
2202
  @param private_fn: Callable for constructing a private data field for a
2203
    modification
2204
  @rtype: list
2205

2206
  """
2207
  if private_fn is None:
2208
    fn = lambda: None
2209
  else:
2210
    fn = private_fn
2211

    
2212
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2213

    
2214

    
2215
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2216
  """Checks if nodes have enough physical CPUs
2217

2218
  This function checks if all given nodes have the needed number of
2219
  physical CPUs. In case any node has less CPUs or we cannot get the
2220
  information from the node, this function raises an OpPrereqError
2221
  exception.
2222

2223
  @type lu: C{LogicalUnit}
2224
  @param lu: a logical unit from which we get configuration data
2225
  @type node_uuids: C{list}
2226
  @param node_uuids: the list of node UUIDs to check
2227
  @type requested: C{int}
2228
  @param requested: the minimum acceptable number of physical CPUs
2229
  @type hypervisor_specs: list of pairs (string, dict of strings)
2230
  @param hypervisor_specs: list of hypervisor specifications in
2231
      pairs (hypervisor_name, hvparams)
2232
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2233
      or we cannot check the node
2234

2235
  """
2236
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2237
  for node_uuid in node_uuids:
2238
    info = nodeinfo[node_uuid]
2239
    node_name = lu.cfg.GetNodeName(node_uuid)
2240
    info.Raise("Cannot get current information from node %s" % node_name,
2241
               prereq=True, ecode=errors.ECODE_ENVIRON)
2242
    (_, _, (hv_info, )) = info.payload
2243
    num_cpus = hv_info.get("cpu_total", None)
2244
    if not isinstance(num_cpus, int):
2245
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2246
                                 " on node %s, result was '%s'" %
2247
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2248
    if requested > num_cpus:
2249
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2250
                                 "required" % (node_name, num_cpus, requested),
2251
                                 errors.ECODE_NORES)
2252

    
2253

    
2254
def GetItemFromContainer(identifier, kind, container):
2255
  """Return the item refered by the identifier.
2256

2257
  @type identifier: string
2258
  @param identifier: Item index or name or UUID
2259
  @type kind: string
2260
  @param kind: One-word item description
2261
  @type container: list
2262
  @param container: Container to get the item from
2263

2264
  """
2265
  # Index
2266
  try:
2267
    idx = int(identifier)
2268
    if idx == -1:
2269
      # Append
2270
      absidx = len(container) - 1
2271
    elif idx < 0:
2272
      raise IndexError("Not accepting negative indices other than -1")
2273
    elif idx > len(container):
2274
      raise IndexError("Got %s index %s, but there are only %s" %
2275
                       (kind, idx, len(container)))
2276
    else:
2277
      absidx = idx
2278
    return (absidx, container[idx])
2279
  except ValueError:
2280
    pass
2281

    
2282
  for idx, item in enumerate(container):
2283
    if item.uuid == identifier or item.name == identifier:
2284
      return (idx, item)
2285

    
2286
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2287
                             (kind, identifier), errors.ECODE_NOENT)
2288

    
2289

    
2290
def _ApplyContainerMods(kind, container, chgdesc, mods,
2291
                        create_fn, modify_fn, remove_fn,
2292
                        post_add_fn=None):
2293
  """Applies descriptions in C{mods} to C{container}.
2294

2295
  @type kind: string
2296
  @param kind: One-word item description
2297
  @type container: list
2298
  @param container: Container to modify
2299
  @type chgdesc: None or list
2300
  @param chgdesc: List of applied changes
2301
  @type mods: list
2302
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2303
  @type create_fn: callable
2304
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2305
    receives absolute item index, parameters and private data object as added
2306
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2307
    as list
2308
  @type modify_fn: callable
2309
  @param modify_fn: Callback for modifying an existing item
2310
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2311
    and private data object as added by L{_PrepareContainerMods}, returns
2312
    changes as list
2313
  @type remove_fn: callable
2314
  @param remove_fn: Callback on removing item; receives absolute item index,
2315
    item and private data object as added by L{_PrepareContainerMods}
2316
  @type post_add_fn: callable
2317
  @param post_add_fn: Callable for post-processing a newly created item after
2318
    it has been put into the container. It receives the index of the new item
2319
    and the new item as parameters.
2320

2321
  """
2322
  for (op, identifier, params, private) in mods:
2323
    changes = None
2324

    
2325
    if op == constants.DDM_ADD:
2326
      # Calculate where item will be added
2327
      # When adding an item, identifier can only be an index
2328
      try:
2329
        idx = int(identifier)
2330
      except ValueError:
2331
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2332
                                   " identifier for %s" % constants.DDM_ADD,
2333
                                   errors.ECODE_INVAL)
2334
      if idx == -1:
2335
        addidx = len(container)
2336
      else:
2337
        if idx < 0:
2338
          raise IndexError("Not accepting negative indices other than -1")
2339
        elif idx > len(container):
2340
          raise IndexError("Got %s index %s, but there are only %s" %
2341
                           (kind, idx, len(container)))
2342
        addidx = idx
2343

    
2344
      if create_fn is None:
2345
        item = params
2346
      else:
2347
        (item, changes) = create_fn(addidx, params, private)
2348

    
2349
      if idx == -1:
2350
        container.append(item)
2351
      else:
2352
        assert idx >= 0
2353
        assert idx <= len(container)
2354
        # list.insert does so before the specified index
2355
        container.insert(idx, item)
2356

    
2357
      if post_add_fn is not None:
2358
        post_add_fn(addidx, item)
2359

    
2360
    else:
2361
      # Retrieve existing item
2362
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2363

    
2364
      if op == constants.DDM_REMOVE:
2365
        assert not params
2366

    
2367
        changes = [("%s/%s" % (kind, absidx), "remove")]
2368

    
2369
        if remove_fn is not None:
2370
          msg = remove_fn(absidx, item, private)
2371
          if msg:
2372
            changes.append(("%s/%s" % (kind, absidx), msg))
2373

    
2374
        assert container[absidx] == item
2375
        del container[absidx]
2376
      elif op == constants.DDM_MODIFY:
2377
        if modify_fn is not None:
2378
          changes = modify_fn(absidx, item, params, private)
2379
      else:
2380
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2381

    
2382
    assert _TApplyContModsCbChanges(changes)
2383

    
2384
    if not (chgdesc is None or changes is None):
2385
      chgdesc.extend(changes)
2386

    
2387

    
2388
def _UpdateIvNames(base_index, disks):
2389
  """Updates the C{iv_name} attribute of disks.
2390

2391
  @type disks: list of L{objects.Disk}
2392

2393
  """
2394
  for (idx, disk) in enumerate(disks):
2395
    disk.iv_name = "disk/%s" % (base_index + idx, )
2396

    
2397

    
2398
class LUInstanceSetParams(LogicalUnit):
2399
  """Modifies an instances's parameters.
2400

2401
  """
2402
  HPATH = "instance-modify"
2403
  HTYPE = constants.HTYPE_INSTANCE
2404
  REQ_BGL = False
2405

    
2406
  @staticmethod
2407
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2408
    assert ht.TList(mods)
2409
    assert not mods or len(mods[0]) in (2, 3)
2410

    
2411
    if mods and len(mods[0]) == 2:
2412
      result = []
2413

    
2414
      addremove = 0
2415
      for op, params in mods:
2416
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2417
          result.append((op, -1, params))
2418
          addremove += 1
2419

    
2420
          if addremove > 1:
2421
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2422
                                       " supported at a time" % kind,
2423
                                       errors.ECODE_INVAL)
2424
        else:
2425
          result.append((constants.DDM_MODIFY, op, params))
2426

    
2427
      assert verify_fn(result)
2428
    else:
2429
      result = mods
2430

    
2431
    return result
2432

    
2433
  @staticmethod
2434
  def _CheckMods(kind, mods, key_types, item_fn):
2435
    """Ensures requested disk/NIC modifications are valid.
2436

2437
    """
2438
    for (op, _, params) in mods:
2439
      assert ht.TDict(params)
2440

    
2441
      # If 'key_types' is an empty dict, we assume we have an
2442
      # 'ext' template and thus do not ForceDictType
2443
      if key_types:
2444
        utils.ForceDictType(params, key_types)
2445

    
2446
      if op == constants.DDM_REMOVE:
2447
        if params:
2448
          raise errors.OpPrereqError("No settings should be passed when"
2449
                                     " removing a %s" % kind,
2450
                                     errors.ECODE_INVAL)
2451
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2452
        item_fn(op, params)
2453
      else:
2454
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2455

    
2456
  def _VerifyDiskModification(self, op, params, excl_stor):
2457
    """Verifies a disk modification.
2458

2459
    """
2460
    if op == constants.DDM_ADD:
2461
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2462
      if mode not in constants.DISK_ACCESS_SET:
2463
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2464
                                   errors.ECODE_INVAL)
2465

    
2466
      size = params.get(constants.IDISK_SIZE, None)
2467
      if size is None:
2468
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2469
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2470
      size = int(size)
2471

    
2472
      params[constants.IDISK_SIZE] = size
2473
      name = params.get(constants.IDISK_NAME, None)
2474
      if name is not None and name.lower() == constants.VALUE_NONE:
2475
        params[constants.IDISK_NAME] = None
2476

    
2477
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2478

    
2479
    elif op == constants.DDM_MODIFY:
2480
      if constants.IDISK_SIZE in params:
2481
        raise errors.OpPrereqError("Disk size change not possible, use"
2482
                                   " grow-disk", errors.ECODE_INVAL)
2483

    
2484
      # Disk modification supports changing only the disk name and mode.
2485
      # Changing arbitrary parameters is allowed only for ext disk template",
2486
      if self.instance.disk_template != constants.DT_EXT:
2487
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2488

    
2489
      name = params.get(constants.IDISK_NAME, None)
2490
      if name is not None and name.lower() == constants.VALUE_NONE:
2491
        params[constants.IDISK_NAME] = None
2492

    
2493
  @staticmethod
2494
  def _VerifyNicModification(op, params):
2495
    """Verifies a network interface modification.
2496

2497
    """
2498
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2499
      ip = params.get(constants.INIC_IP, None)
2500
      name = params.get(constants.INIC_NAME, None)
2501
      req_net = params.get(constants.INIC_NETWORK, None)
2502
      link = params.get(constants.NIC_LINK, None)
2503
      mode = params.get(constants.NIC_MODE, None)
2504
      if name is not None and name.lower() == constants.VALUE_NONE:
2505
        params[constants.INIC_NAME] = None
2506
      if req_net is not None:
2507
        if req_net.lower() == constants.VALUE_NONE:
2508
          params[constants.INIC_NETWORK] = None
2509
          req_net = None
2510
        elif link is not None or mode is not None:
2511
          raise errors.OpPrereqError("If network is given"
2512
                                     " mode or link should not",
2513
                                     errors.ECODE_INVAL)
2514

    
2515
      if op == constants.DDM_ADD:
2516
        macaddr = params.get(constants.INIC_MAC, None)
2517
        if macaddr is None:
2518
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2519

    
2520
      if ip is not None:
2521
        if ip.lower() == constants.VALUE_NONE:
2522
          params[constants.INIC_IP] = None
2523
        else:
2524
          if ip.lower() == constants.NIC_IP_POOL:
2525
            if op == constants.DDM_ADD and req_net is None:
2526
              raise errors.OpPrereqError("If ip=pool, parameter network"
2527
                                         " cannot be none",
2528
                                         errors.ECODE_INVAL)
2529
          else:
2530
            if not netutils.IPAddress.IsValid(ip):
2531
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2532
                                         errors.ECODE_INVAL)
2533

    
2534
      if constants.INIC_MAC in params:
2535
        macaddr = params[constants.INIC_MAC]
2536
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2537
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2538

    
2539
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2540
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2541
                                     " modifying an existing NIC",
2542
                                     errors.ECODE_INVAL)
2543

    
2544
  def CheckArguments(self):
2545
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2546
            self.op.hvparams or self.op.beparams or self.op.os_name or
2547
            self.op.osparams or self.op.offline is not None or
2548
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2549
            self.op.instance_communication is not None):
2550
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2551

    
2552
    if self.op.hvparams:
2553
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2554
                           "hypervisor", "instance", "cluster")
2555

    
2556
    self.op.disks = self._UpgradeDiskNicMods(
2557
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2558
    self.op.nics = self._UpgradeDiskNicMods(
2559
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2560

    
2561
    if self.op.disks and self.op.disk_template is not None:
2562
      raise errors.OpPrereqError("Disk template conversion and other disk"
2563
                                 " changes not supported at the same time",
2564
                                 errors.ECODE_INVAL)
2565

    
2566
    if (self.op.disk_template and
2567
        self.op.disk_template in constants.DTS_INT_MIRROR and
2568
        self.op.remote_node is None):
2569
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2570
                                 " one requires specifying a secondary node",
2571
                                 errors.ECODE_INVAL)
2572

    
2573
    # Check NIC modifications
2574
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2575
                    self._VerifyNicModification)
2576

    
2577
    if self.op.pnode:
2578
      (self.op.pnode_uuid, self.op.pnode) = \
2579
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2580

    
2581
  def ExpandNames(self):
2582
    self._ExpandAndLockInstance()
2583
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2584
    # Can't even acquire node locks in shared mode as upcoming changes in
2585
    # Ganeti 2.6 will start to modify the node object on disk conversion
2586
    self.needed_locks[locking.LEVEL_NODE] = []
2587
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2588
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2589
    # Look node group to look up the ipolicy
2590
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2591

    
2592
  def DeclareLocks(self, level):
2593
    if level == locking.LEVEL_NODEGROUP:
2594
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2595
      # Acquire locks for the instance's nodegroups optimistically. Needs
2596
      # to be verified in CheckPrereq
2597
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2598
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2599
    elif level == locking.LEVEL_NODE:
2600
      self._LockInstancesNodes()
2601
      if self.op.disk_template and self.op.remote_node:
2602
        (self.op.remote_node_uuid, self.op.remote_node) = \
2603
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2604
                                self.op.remote_node)
2605
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2606
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2607
      # Copy node locks
2608
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2609
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2610

    
2611
  def BuildHooksEnv(self):
2612
    """Build hooks env.
2613

2614
    This runs on the master, primary and secondaries.
2615

2616
    """
2617
    args = {}
2618
    if constants.BE_MINMEM in self.be_new:
2619
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2620
    if constants.BE_MAXMEM in self.be_new:
2621
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2622
    if constants.BE_VCPUS in self.be_new:
2623
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2624
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2625
    # information at all.
2626

    
2627
    if self._new_nics is not None:
2628
      nics = []
2629

    
2630
      for nic in self._new_nics:
2631
        n = copy.deepcopy(nic)
2632
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2633
        n.nicparams = nicparams
2634
        nics.append(NICToTuple(self, n))
2635

    
2636
      args["nics"] = nics
2637

    
2638
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2639
    if self.op.disk_template:
2640
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2641
    if self.op.runtime_mem:
2642
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2643

    
2644
    return env
2645

    
2646
  def BuildHooksNodes(self):
2647
    """Build hooks nodes.
2648

2649
    """
2650
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2651
    return (nl, nl)
2652

    
2653
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2654
                              old_params, cluster, pnode_uuid):
2655

    
2656
    update_params_dict = dict([(key, params[key])
2657
                               for key in constants.NICS_PARAMETERS
2658
                               if key in params])
2659

    
2660
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2661
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2662

    
2663
    new_net_uuid = None
2664
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2665
    if new_net_uuid_or_name:
2666
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2667
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2668

    
2669
    if old_net_uuid:
2670
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2671

    
2672
    if new_net_uuid:
2673
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2674
      if not netparams:
2675
        raise errors.OpPrereqError("No netparams found for the network"
2676
                                   " %s, probably not connected" %
2677
                                   new_net_obj.name, errors.ECODE_INVAL)
2678
      new_params = dict(netparams)
2679
    else:
2680
      new_params = GetUpdatedParams(old_params, update_params_dict)
2681

    
2682
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2683

    
2684
    new_filled_params = cluster.SimpleFillNIC(new_params)
2685
    objects.NIC.CheckParameterSyntax(new_filled_params)
2686

    
2687
    new_mode = new_filled_params[constants.NIC_MODE]
2688
    if new_mode == constants.NIC_MODE_BRIDGED:
2689
      bridge = new_filled_params[constants.NIC_LINK]
2690
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2691
      if msg:
2692
        msg = "Error checking bridges on node '%s': %s" % \
2693
                (self.cfg.GetNodeName(pnode_uuid), msg)
2694
        if self.op.force:
2695
          self.warn.append(msg)
2696
        else:
2697
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2698

    
2699
    elif new_mode == constants.NIC_MODE_ROUTED:
2700
      ip = params.get(constants.INIC_IP, old_ip)
2701
      if ip is None:
2702
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2703
                                   " on a routed NIC", errors.ECODE_INVAL)
2704

    
2705
    elif new_mode == constants.NIC_MODE_OVS:
2706
      # TODO: check OVS link
2707
      self.LogInfo("OVS links are currently not checked for correctness")
2708

    
2709
    if constants.INIC_MAC in params:
2710
      mac = params[constants.INIC_MAC]
2711
      if mac is None:
2712
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2713
                                   errors.ECODE_INVAL)
2714
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2715
        # otherwise generate the MAC address
2716
        params[constants.INIC_MAC] = \
2717
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2718
      else:
2719
        # or validate/reserve the current one
2720
        try:
2721
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2722
        except errors.ReservationError:
2723
          raise errors.OpPrereqError("MAC address '%s' already in use"
2724
                                     " in cluster" % mac,
2725
                                     errors.ECODE_NOTUNIQUE)
2726
    elif new_net_uuid != old_net_uuid:
2727

    
2728
      def get_net_prefix(net_uuid):
2729
        mac_prefix = None
2730
        if net_uuid:
2731
          nobj = self.cfg.GetNetwork(net_uuid)
2732
          mac_prefix = nobj.mac_prefix
2733

    
2734
        return mac_prefix
2735

    
2736
      new_prefix = get_net_prefix(new_net_uuid)
2737
      old_prefix = get_net_prefix(old_net_uuid)
2738
      if old_prefix != new_prefix:
2739
        params[constants.INIC_MAC] = \
2740
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2741

    
2742
    # if there is a change in (ip, network) tuple
2743
    new_ip = params.get(constants.INIC_IP, old_ip)
2744
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2745
      if new_ip:
2746
        # if IP is pool then require a network and generate one IP
2747
        if new_ip.lower() == constants.NIC_IP_POOL:
2748
          if new_net_uuid:
2749
            try:
2750
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2751
            except errors.ReservationError:
2752
              raise errors.OpPrereqError("Unable to get a free IP"
2753
                                         " from the address pool",
2754
                                         errors.ECODE_STATE)
2755
            self.LogInfo("Chose IP %s from network %s",
2756
                         new_ip,
2757
                         new_net_obj.name)
2758
            params[constants.INIC_IP] = new_ip
2759
          else:
2760
            raise errors.OpPrereqError("ip=pool, but no network found",
2761
                                       errors.ECODE_INVAL)
2762
        # Reserve new IP if in the new network if any
2763
        elif new_net_uuid:
2764
          try:
2765
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2766
                               check=self.op.conflicts_check)
2767
            self.LogInfo("Reserving IP %s in network %s",
2768
                         new_ip, new_net_obj.name)
2769
          except errors.ReservationError:
2770
            raise errors.OpPrereqError("IP %s not available in network %s" %
2771
                                       (new_ip, new_net_obj.name),
2772
                                       errors.ECODE_NOTUNIQUE)
2773
        # new network is None so check if new IP is a conflicting IP
2774
        elif self.op.conflicts_check:
2775
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2776

    
2777
      # release old IP if old network is not None
2778
      if old_ip and old_net_uuid:
2779
        try:
2780
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2781
        except errors.AddressPoolError:
2782
          logging.warning("Release IP %s not contained in network %s",
2783
                          old_ip, old_net_obj.name)
2784

    
2785
    # there are no changes in (ip, network) tuple and old network is not None
2786
    elif (old_net_uuid is not None and
2787
          (req_link is not None or req_mode is not None)):
2788
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2789
                                 " a NIC that is connected to a network",
2790
                                 errors.ECODE_INVAL)
2791

    
2792
    private.params = new_params
2793
    private.filled = new_filled_params
2794

    
2795
  def _PreCheckDiskTemplate(self, pnode_info):
2796
    """CheckPrereq checks related to a new disk template."""
2797
    # Arguments are passed to avoid configuration lookups
2798
    pnode_uuid = self.instance.primary_node
2799
    if self.instance.disk_template == self.op.disk_template:
2800
      raise errors.OpPrereqError("Instance already has disk template %s" %
2801
                                 self.instance.disk_template,
2802
                                 errors.ECODE_INVAL)
2803

    
2804
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2805
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2806
                                 " cluster." % self.op.disk_template)
2807

    
2808
    if (self.instance.disk_template,
2809
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2810
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2811
                                 " %s to %s" % (self.instance.disk_template,
2812
                                                self.op.disk_template),
2813
                                 errors.ECODE_INVAL)
2814
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2815
                       msg="cannot change disk template")
2816
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2817
      if self.op.remote_node_uuid == pnode_uuid:
2818
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2819
                                   " as the primary node of the instance" %
2820
                                   self.op.remote_node, errors.ECODE_STATE)
2821
      CheckNodeOnline(self, self.op.remote_node_uuid)
2822
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2823
      # FIXME: here we assume that the old instance type is DT_PLAIN
2824
      assert self.instance.disk_template == constants.DT_PLAIN
2825
      disks = [{constants.IDISK_SIZE: d.size,
2826
                constants.IDISK_VG: d.logical_id[0]}
2827
               for d in self.instance.disks]
2828
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2829
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2830

    
2831
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2832
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2833
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2834
                                                              snode_group)
2835
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2836
                             ignore=self.op.ignore_ipolicy)
2837
      if pnode_info.group != snode_info.group:
2838
        self.LogWarning("The primary and secondary nodes are in two"
2839
                        " different node groups; the disk parameters"
2840
                        " from the first disk's node group will be"
2841
                        " used")
2842

    
2843
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2844
      # Make sure none of the nodes require exclusive storage
2845
      nodes = [pnode_info]
2846
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2847
        assert snode_info
2848
        nodes.append(snode_info)
2849
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2850
      if compat.any(map(has_es, nodes)):
2851
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2852
                  " storage is enabled" % (self.instance.disk_template,
2853
                                           self.op.disk_template))
2854
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2855

    
2856
  def _PreCheckDisks(self, ispec):
2857
    """CheckPrereq checks related to disk changes.
2858

2859
    @type ispec: dict
2860
    @param ispec: instance specs to be updated with the new disks
2861

2862
    """
2863
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2864

    
2865
    excl_stor = compat.any(
2866
      rpc.GetExclusiveStorageForNodes(self.cfg,
2867
                                      self.instance.all_nodes).values()
2868
      )
2869

    
2870
    # Check disk modifications. This is done here and not in CheckArguments
2871
    # (as with NICs), because we need to know the instance's disk template
2872
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2873
    if self.instance.disk_template == constants.DT_EXT:
2874
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2875
    else:
2876
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2877
                      ver_fn)
2878

    
2879
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2880

    
2881
    # Check the validity of the `provider' parameter
2882
    if self.instance.disk_template in constants.DT_EXT:
2883
      for mod in self.diskmod:
2884
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2885
        if mod[0] == constants.DDM_ADD:
2886
          if ext_provider is None:
2887
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2888
                                       " '%s' missing, during disk add" %
2889
                                       (constants.DT_EXT,
2890
                                        constants.IDISK_PROVIDER),
2891
                                       errors.ECODE_NOENT)
2892
        elif mod[0] == constants.DDM_MODIFY:
2893
          if ext_provider:
2894
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2895
                                       " modification" %
2896
                                       constants.IDISK_PROVIDER,
2897
                                       errors.ECODE_INVAL)
2898
    else:
2899
      for mod in self.diskmod:
2900
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2901
        if ext_provider is not None:
2902
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2903
                                     " instances of type '%s'" %
2904
                                     (constants.IDISK_PROVIDER,
2905
                                      constants.DT_EXT),
2906
                                     errors.ECODE_INVAL)
2907

    
2908
    if not self.op.wait_for_sync and self.instance.disks_active:
2909
      for mod in self.diskmod:
2910
        if mod[0] == constants.DDM_ADD:
2911
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2912
                                     " activated disks and"
2913
                                     " --no-wait-for-sync given.",
2914
                                     errors.ECODE_INVAL)
2915

    
2916
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2917
      raise errors.OpPrereqError("Disk operations not supported for"
2918
                                 " diskless instances", errors.ECODE_INVAL)
2919

    
2920
    def _PrepareDiskMod(_, disk, params, __):
2921
      disk.name = params.get(constants.IDISK_NAME, None)
2922

    
2923
    # Verify disk changes (operating on a copy)
2924
    disks = copy.deepcopy(self.instance.disks)
2925
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2926
                        _PrepareDiskMod, None)
2927
    utils.ValidateDeviceNames("disk", disks)
2928
    if len(disks) > constants.MAX_DISKS:
2929
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2930
                                 " more" % constants.MAX_DISKS,
2931
                                 errors.ECODE_STATE)
2932
    disk_sizes = [disk.size for disk in self.instance.disks]
2933
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2934
                      self.diskmod if op == constants.DDM_ADD)
2935
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2936
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2937

    
2938
    if self.op.offline is not None and self.op.offline:
2939
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2940
                         msg="can't change to offline")
2941

    
2942
  @staticmethod
2943
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2944
    """Create a NIC mod that adds or removes the instance
2945
    communication NIC to a running instance.
2946

2947
    The NICS are dynamically created using the Dynamic Device
2948
    Modification (DDM).  This function produces a NIC modification
2949
    (mod) that inserts an additional NIC meant for instance
2950
    communication in or removes an existing instance communication NIC
2951
    from a running instance, using DDM.
2952

2953
    @type cfg: L{config.ConfigWriter}
2954
    @param cfg: cluster configuration
2955

2956
    @type instance_communication: boolean
2957
    @param instance_communication: whether instance communication is
2958
                                   enabled or disabled
2959

2960
    @type instance: L{objects.Instance}
2961
    @param instance: instance to which the NIC mod will be applied to
2962

2963
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
2964
            (L{constants.DDM_REMOVE}, -1, parameters) or
2965
            L{None}
2966
    @return: DDM mod containing an action to add or remove the NIC, or
2967
             None if nothing needs to be done
2968

2969
    """
2970
    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
2971

    
2972
    instance_communication_nic = None
2973

    
2974
    for nic in instance.nics:
2975
      if nic.name == nic_name:
2976
        instance_communication_nic = nic
2977
        break
2978

    
2979
    if instance_communication and not instance_communication_nic:
2980
      action = constants.DDM_ADD
2981
      params = {constants.INIC_NAME: nic_name,
2982
                constants.INIC_MAC: constants.VALUE_GENERATE,
2983
                constants.INIC_IP: constants.NIC_IP_POOL,
2984
                constants.INIC_NETWORK:
2985
                  cfg.GetInstanceCommunicationNetwork()}
2986
    elif not instance_communication and instance_communication_nic:
2987
      action = constants.DDM_REMOVE
2988
      params = None
2989
    else:
2990
      action = None
2991
      params = None
2992

    
2993
    if action is not None:
2994
      return (action, -1, params)
2995
    else:
2996
      return None
2997

    
2998
  def CheckPrereq(self):
2999
    """Check prerequisites.
3000

3001
    This only checks the instance list against the existing names.
3002

3003
    """
3004
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3005
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3006
    self.cluster = self.cfg.GetClusterInfo()
3007
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
3008

    
3009
    assert self.instance is not None, \
3010
      "Cannot retrieve locked instance %s" % self.op.instance_name
3011

    
3012
    pnode_uuid = self.instance.primary_node
3013

    
3014
    self.warn = []
3015

    
3016
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
3017
        not self.op.force):
3018
      # verify that the instance is not up
3019
      instance_info = self.rpc.call_instance_info(
3020
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3021
          cluster_hvparams)
3022
      if instance_info.fail_msg:
3023
        self.warn.append("Can't get instance runtime information: %s" %
3024
                         instance_info.fail_msg)
3025
      elif instance_info.payload:
3026
        raise errors.OpPrereqError("Instance is still running on %s" %
3027
                                   self.cfg.GetNodeName(pnode_uuid),
3028
                                   errors.ECODE_STATE)
3029

    
3030
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3031
    node_uuids = list(self.instance.all_nodes)
3032
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3033

    
3034
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3035
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3036
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3037

    
3038
    # dictionary with instance information after the modification
3039
    ispec = {}
3040

    
3041
    if self.op.hotplug or self.op.hotplug_if_possible:
3042
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3043
                                               self.instance)
3044
      if result.fail_msg:
3045
        if self.op.hotplug:
3046
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3047
                       prereq=True)
3048
        else:
3049
          self.LogWarning(result.fail_msg)
3050
          self.op.hotplug = False
3051
          self.LogInfo("Modification will take place without hotplugging.")
3052
      else:
3053
        self.op.hotplug = True
3054

    
3055
    # Prepare NIC modifications
3056
    # add or remove NIC for instance communication
3057
    if self.op.instance_communication is not None:
3058
      mod = self._InstanceCommunicationDDM(self.cfg,
3059
                                           self.op.instance_communication,
3060
                                           self.instance)
3061
      if mod is not None:
3062
        self.op.nics.append(mod)
3063

    
3064
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3065

    
3066
    # OS change
3067
    if self.op.os_name and not self.op.force:
3068
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
3069
                     self.op.force_variant)
3070
      instance_os = self.op.os_name
3071
    else:
3072
      instance_os = self.instance.os
3073

    
3074
    assert not (self.op.disk_template and self.op.disks), \
3075
      "Can't modify disk template and apply disk changes at the same time"
3076

    
3077
    if self.op.disk_template:
3078
      self._PreCheckDiskTemplate(pnode_info)
3079

    
3080
    self._PreCheckDisks(ispec)
3081

    
3082
    # hvparams processing
3083
    if self.op.hvparams:
3084
      hv_type = self.instance.hypervisor
3085
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3086
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3087
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3088

    
3089
      # local check
3090
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3091
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3092
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3093
      self.hv_inst = i_hvdict # the new dict (without defaults)
3094
    else:
3095
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3096
                                                   self.instance.os,
3097
                                                   self.instance.hvparams)
3098
      self.hv_new = self.hv_inst = {}
3099

    
3100
    # beparams processing
3101
    if self.op.beparams:
3102
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3103
                                  use_none=True)
3104
      objects.UpgradeBeParams(i_bedict)
3105
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3106
      be_new = self.cluster.SimpleFillBE(i_bedict)
3107
      self.be_proposed = self.be_new = be_new # the new actual values
3108
      self.be_inst = i_bedict # the new dict (without defaults)
3109
    else:
3110
      self.be_new = self.be_inst = {}
3111
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3112
    be_old = self.cluster.FillBE(self.instance)
3113

    
3114
    # CPU param validation -- checking every time a parameter is
3115
    # changed to cover all cases where either CPU mask or vcpus have
3116
    # changed
3117
    if (constants.BE_VCPUS in self.be_proposed and
3118
        constants.HV_CPU_MASK in self.hv_proposed):
3119
      cpu_list = \
3120
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3121
      # Verify mask is consistent with number of vCPUs. Can skip this
3122
      # test if only 1 entry in the CPU mask, which means same mask
3123
      # is applied to all vCPUs.
3124
      if (len(cpu_list) > 1 and
3125
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3126
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3127
                                   " CPU mask [%s]" %
3128
                                   (self.be_proposed[constants.BE_VCPUS],
3129
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3130
                                   errors.ECODE_INVAL)
3131

    
3132
      # Only perform this test if a new CPU mask is given
3133
      if constants.HV_CPU_MASK in self.hv_new:
3134
        # Calculate the largest CPU number requested
3135
        max_requested_cpu = max(map(max, cpu_list))
3136
        # Check that all of the instance's nodes have enough physical CPUs to
3137
        # satisfy the requested CPU mask
3138
        hvspecs = [(self.instance.hypervisor,
3139
                    self.cfg.GetClusterInfo()
3140
                      .hvparams[self.instance.hypervisor])]
3141
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3142
                                max_requested_cpu + 1,
3143
                                hvspecs)
3144

    
3145
    # osparams processing
3146
    if self.op.osparams or self.op.osparams_private:
3147
      public_parms = self.op.osparams or {}
3148
      private_parms = self.op.osparams_private or {}
3149
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3150

    
3151
      if dupe_keys:
3152
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3153
                                   utils.CommaJoin(dupe_keys))
3154

    
3155
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3156
                                      public_parms)
3157
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3158
                                              private_parms)
3159

    
3160
      CheckOSParams(self, True, node_uuids, instance_os,
3161
                    objects.FillDict(self.os_inst,
3162
                                     self.os_inst_private))
3163

    
3164
    else:
3165
      self.os_inst = {}
3166
      self.os_inst_private = {}
3167

    
3168
    #TODO(dynmem): do the appropriate check involving MINMEM
3169
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3170
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3171
      mem_check_list = [pnode_uuid]
3172
      if be_new[constants.BE_AUTO_BALANCE]:
3173
        # either we changed auto_balance to yes or it was from before
3174
        mem_check_list.extend(self.instance.secondary_nodes)
3175
      instance_info = self.rpc.call_instance_info(
3176
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3177
          cluster_hvparams)
3178
      hvspecs = [(self.instance.hypervisor,
3179
                  cluster_hvparams)]
3180
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3181
                                         hvspecs)
3182
      pninfo = nodeinfo[pnode_uuid]
3183
      msg = pninfo.fail_msg
3184
      if msg:
3185
        # Assume the primary node is unreachable and go ahead
3186
        self.warn.append("Can't get info from primary node %s: %s" %
3187
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3188
      else:
3189
        (_, _, (pnhvinfo, )) = pninfo.payload
3190
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3191
          self.warn.append("Node data from primary node %s doesn't contain"
3192
                           " free memory information" %
3193
                           self.cfg.GetNodeName(pnode_uuid))
3194
        elif instance_info.fail_msg:
3195
          self.warn.append("Can't get instance runtime information: %s" %
3196
                           instance_info.fail_msg)
3197
        else:
3198
          if instance_info.payload:
3199
            current_mem = int(instance_info.payload["memory"])
3200
          else:
3201
            # Assume instance not running
3202
            # (there is a slight race condition here, but it's not very
3203
            # probable, and we have no other way to check)
3204
            # TODO: Describe race condition
3205
            current_mem = 0
3206
          #TODO(dynmem): do the appropriate check involving MINMEM
3207
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3208
                      pnhvinfo["memory_free"])
3209
          if miss_mem > 0:
3210
            raise errors.OpPrereqError("This change will prevent the instance"
3211
                                       " from starting, due to %d MB of memory"
3212
                                       " missing on its primary node" %
3213
                                       miss_mem, errors.ECODE_NORES)
3214

    
3215
      if be_new[constants.BE_AUTO_BALANCE]:
3216
        for node_uuid, nres in nodeinfo.items():
3217
          if node_uuid not in self.instance.secondary_nodes:
3218
            continue
3219
          nres.Raise("Can't get info from secondary node %s" %
3220
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3221
                     ecode=errors.ECODE_STATE)
3222
          (_, _, (nhvinfo, )) = nres.payload
3223
          if not isinstance(nhvinfo.get("memory_free", None), int):
3224
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3225
                                       " memory information" %
3226
                                       self.cfg.GetNodeName(node_uuid),
3227
                                       errors.ECODE_STATE)
3228
          #TODO(dynmem): do the appropriate check involving MINMEM
3229
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3230
            raise errors.OpPrereqError("This change will prevent the instance"
3231
                                       " from failover to its secondary node"
3232
                                       " %s, due to not enough memory" %
3233
                                       self.cfg.GetNodeName(node_uuid),
3234
                                       errors.ECODE_STATE)
3235

    
3236
    if self.op.runtime_mem:
3237
      remote_info = self.rpc.call_instance_info(
3238
         self.instance.primary_node, self.instance.name,
3239
         self.instance.hypervisor,
3240
         cluster_hvparams)
3241
      remote_info.Raise("Error checking node %s" %
3242
                        self.cfg.GetNodeName(self.instance.primary_node))
3243
      if not remote_info.payload: # not running already
3244
        raise errors.OpPrereqError("Instance %s is not running" %
3245
                                   self.instance.name, errors.ECODE_STATE)
3246

    
3247
      current_memory = remote_info.payload["memory"]
3248
      if (not self.op.force and
3249
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3250
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3251
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3252
                                   " and %d MB of memory unless --force is"
3253
                                   " given" %
3254
                                   (self.instance.name,
3255
                                    self.be_proposed[constants.BE_MINMEM],
3256
                                    self.be_proposed[constants.BE_MAXMEM]),
3257
                                   errors.ECODE_INVAL)
3258

    
3259
      delta = self.op.runtime_mem - current_memory
3260
      if delta > 0:
3261
        CheckNodeFreeMemory(
3262
            self, self.instance.primary_node,
3263
            "ballooning memory for instance %s" % self.instance.name, delta,
3264
            self.instance.hypervisor,
3265
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3266

    
3267
    # make self.cluster visible in the functions below
3268
    cluster = self.cluster
3269

    
3270
    def _PrepareNicCreate(_, params, private):
3271
      self._PrepareNicModification(params, private, None, None,
3272
                                   {}, cluster, pnode_uuid)
3273
      return (None, None)
3274

    
3275
    def _PrepareNicMod(_, nic, params, private):
3276
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3277
                                   nic.nicparams, cluster, pnode_uuid)
3278
      return None
3279

    
3280
    def _PrepareNicRemove(_, params, __):
3281
      ip = params.ip
3282
      net = params.network
3283
      if net is not None and ip is not None:
3284
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3285

    
3286
    # Verify NIC changes (operating on copy)
3287
    nics = [nic.Copy() for nic in self.instance.nics]
3288
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3289
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3290
    if len(nics) > constants.MAX_NICS:
3291
      raise errors.OpPrereqError("Instance has too many network interfaces"
3292
                                 " (%d), cannot add more" % constants.MAX_NICS,
3293
                                 errors.ECODE_STATE)
3294

    
3295
    # Pre-compute NIC changes (necessary to use result in hooks)
3296
    self._nic_chgdesc = []
3297
    if self.nicmod:
3298
      # Operate on copies as this is still in prereq
3299
      nics = [nic.Copy() for nic in self.instance.nics]
3300
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3301
                          self._CreateNewNic, self._ApplyNicMods,
3302
                          self._RemoveNic)
3303
      # Verify that NIC names are unique and valid
3304
      utils.ValidateDeviceNames("NIC", nics)
3305
      self._new_nics = nics
3306
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3307
    else:
3308
      self._new_nics = None
3309
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3310

    
3311
    if not self.op.ignore_ipolicy:
3312
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3313
                                                              group_info)
3314

    
3315
      # Fill ispec with backend parameters
3316
      ispec[constants.ISPEC_SPINDLE_USE] = \
3317
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3318
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3319
                                                         None)
3320

    
3321
      # Copy ispec to verify parameters with min/max values separately
3322
      if self.op.disk_template:
3323
        new_disk_template = self.op.disk_template
3324
      else:
3325
        new_disk_template = self.instance.disk_template
3326
      ispec_max = ispec.copy()
3327
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3328
        self.be_new.get(constants.BE_MAXMEM, None)
3329
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3330
                                                     new_disk_template)
3331
      ispec_min = ispec.copy()
3332
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3333
        self.be_new.get(constants.BE_MINMEM, None)
3334
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3335
                                                     new_disk_template)
3336

    
3337
      if (res_max or res_min):
3338
        # FIXME: Improve error message by including information about whether
3339
        # the upper or lower limit of the parameter fails the ipolicy.
3340
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3341
               (group_info, group_info.name,
3342
                utils.CommaJoin(set(res_max + res_min))))
3343
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3344

    
3345
  def _ConvertPlainToDrbd(self, feedback_fn):
3346
    """Converts an instance from plain to drbd.
3347

3348
    """
3349
    feedback_fn("Converting template to drbd")
3350
    pnode_uuid = self.instance.primary_node
3351
    snode_uuid = self.op.remote_node_uuid
3352

    
3353
    assert self.instance.disk_template == constants.DT_PLAIN
3354

    
3355
    # create a fake disk info for _GenerateDiskTemplate
3356
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3357
                  constants.IDISK_VG: d.logical_id[0],
3358
                  constants.IDISK_NAME: d.name}
3359
                 for d in self.instance.disks]
3360
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3361
                                     self.instance.uuid, pnode_uuid,
3362
                                     [snode_uuid], disk_info, None, None, 0,
3363
                                     feedback_fn, self.diskparams)
3364
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3365
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3366
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3367
    info = GetInstanceInfoText(self.instance)
3368
    feedback_fn("Creating additional volumes...")
3369
    # first, create the missing data and meta devices
3370
    for disk in anno_disks:
3371
      # unfortunately this is... not too nice
3372
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3373
                           info, True, p_excl_stor)
3374
      for child in disk.children:
3375
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3376
                             s_excl_stor)
3377
    # at this stage, all new LVs have been created, we can rename the
3378
    # old ones
3379
    feedback_fn("Renaming original volumes...")
3380
    rename_list = [(o, n.children[0].logical_id)
3381
                   for (o, n) in zip(self.instance.disks, new_disks)]
3382
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3383
    result.Raise("Failed to rename original LVs")
3384

    
3385
    feedback_fn("Initializing DRBD devices...")
3386
    # all child devices are in place, we can now create the DRBD devices
3387
    try:
3388
      for disk in anno_disks:
3389
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3390
                                       (snode_uuid, s_excl_stor)]:
3391
          f_create = node_uuid == pnode_uuid
3392
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3393
                               f_create, excl_stor)
3394
    except errors.GenericError, e:
3395
      feedback_fn("Initializing of DRBD devices failed;"
3396
                  " renaming back original volumes...")
3397
      rename_back_list = [(n.children[0], o.logical_id)
3398
                          for (n, o) in zip(new_disks, self.instance.disks)]
3399
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3400
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3401
      raise
3402

    
3403
    # at this point, the instance has been modified
3404
    self.instance.disk_template = constants.DT_DRBD8
3405
    self.instance.disks = new_disks
3406
    self.cfg.Update(self.instance, feedback_fn)
3407

    
3408
    # Release node locks while waiting for sync
3409
    ReleaseLocks(self, locking.LEVEL_NODE)
3410

    
3411
    # disks are created, waiting for sync
3412
    disk_abort = not WaitForSync(self, self.instance,
3413
                                 oneshot=not self.op.wait_for_sync)
3414
    if disk_abort:
3415
      raise errors.OpExecError("There are some degraded disks for"
3416
                               " this instance, please cleanup manually")
3417

    
3418
    # Node resource locks will be released by caller
3419

    
3420
  def _ConvertDrbdToPlain(self, feedback_fn):
3421
    """Converts an instance from drbd to plain.
3422

3423
    """
3424
    assert len(self.instance.secondary_nodes) == 1
3425
    assert self.instance.disk_template == constants.DT_DRBD8
3426

    
3427
    pnode_uuid = self.instance.primary_node
3428
    snode_uuid = self.instance.secondary_nodes[0]
3429
    feedback_fn("Converting template to plain")
3430

    
3431
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3432
    new_disks = [d.children[0] for d in self.instance.disks]
3433

    
3434
    # copy over size, mode and name
3435
    for parent, child in zip(old_disks, new_disks):
3436
      child.size = parent.size
3437
      child.mode = parent.mode
3438
      child.name = parent.name
3439

    
3440
    # this is a DRBD disk, return its port to the pool
3441
    # NOTE: this must be done right before the call to cfg.Update!
3442
    for disk in old_disks:
3443
      tcp_port = disk.logical_id[2]
3444
      self.cfg.AddTcpUdpPort(tcp_port)
3445

    
3446
    # update instance structure
3447
    self.instance.disks = new_disks
3448
    self.instance.disk_template = constants.DT_PLAIN
3449
    _UpdateIvNames(0, self.instance.disks)
3450
    self.cfg.Update(self.instance, feedback_fn)
3451

    
3452
    # Release locks in case removing disks takes a while
3453
    ReleaseLocks(self, locking.LEVEL_NODE)
3454

    
3455
    feedback_fn("Removing volumes on the secondary node...")
3456
    for disk in old_disks:
3457
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3458
      result.Warn("Could not remove block device %s on node %s,"
3459
                  " continuing anyway" %
3460
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3461
                  self.LogWarning)
3462

    
3463
    feedback_fn("Removing unneeded volumes on the primary node...")
3464
    for idx, disk in enumerate(old_disks):
3465
      meta = disk.children[1]
3466
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3467
      result.Warn("Could not remove metadata for disk %d on node %s,"
3468
                  " continuing anyway" %
3469
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3470
                  self.LogWarning)
3471

    
3472
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3473
    self.LogInfo("Trying to hotplug device...")
3474
    msg = "hotplug:"
3475
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3476
                                          self.instance, action, dev_type,
3477
                                          (device, self.instance),
3478
                                          extra, seq)
3479
    if result.fail_msg:
3480
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3481
      self.LogInfo("Continuing execution..")
3482
      msg += "failed"
3483
    else:
3484
      self.LogInfo("Hotplug done.")
3485
      msg += "done"
3486
    return msg
3487

    
3488
  def _CreateNewDisk(self, idx, params, _):
3489
    """Creates a new disk.
3490

3491
    """
3492
    # add a new disk
3493
    if self.instance.disk_template in constants.DTS_FILEBASED:
3494
      (file_driver, file_path) = self.instance.disks[0].logical_id
3495
      file_path = os.path.dirname(file_path)
3496
    else:
3497
      file_driver = file_path = None
3498

    
3499
    disk = \
3500
      GenerateDiskTemplate(self, self.instance.disk_template,
3501
                           self.instance.uuid, self.instance.primary_node,
3502
                           self.instance.secondary_nodes, [params], file_path,
3503
                           file_driver, idx, self.Log, self.diskparams)[0]
3504

    
3505
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3506

    
3507
    if self.cluster.prealloc_wipe_disks:
3508
      # Wipe new disk
3509
      WipeOrCleanupDisks(self, self.instance,
3510
                         disks=[(idx, disk, 0)],
3511
                         cleanup=new_disks)
3512

    
3513
    changes = [
3514
      ("disk/%d" % idx,
3515
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3516
      ]
3517
    if self.op.hotplug:
3518
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3519
                                               (disk, self.instance),
3520
                                               self.instance.name, True, idx)
3521
      if result.fail_msg:
3522
        changes.append(("disk/%d" % idx, "assemble:failed"))
3523
        self.LogWarning("Can't assemble newly created disk %d: %s",
3524
                        idx, result.fail_msg)
3525
      else:
3526
        _, link_name = result.payload
3527
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3528
                                  constants.HOTPLUG_TARGET_DISK,
3529
                                  disk, link_name, idx)
3530
        changes.append(("disk/%d" % idx, msg))
3531

    
3532
    return (disk, changes)
3533

    
3534
  def _PostAddDisk(self, _, disk):
3535
    if not WaitForSync(self, self.instance, disks=[disk],
3536
                       oneshot=not self.op.wait_for_sync):
3537
      raise errors.OpExecError("Failed to sync disks of %s" %
3538
                               self.instance.name)
3539

    
3540
    # the disk is active at this point, so deactivate it if the instance disks
3541
    # are supposed to be inactive
3542
    if not self.instance.disks_active:
3543
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3544

    
3545
  def _ModifyDisk(self, idx, disk, params, _):
3546
    """Modifies a disk.
3547

3548
    """
3549
    changes = []
3550
    if constants.IDISK_MODE in params:
3551
      disk.mode = params.get(constants.IDISK_MODE)
3552
      changes.append(("disk.mode/%d" % idx, disk.mode))
3553

    
3554
    if constants.IDISK_NAME in params:
3555
      disk.name = params.get(constants.IDISK_NAME)
3556
      changes.append(("disk.name/%d" % idx, disk.name))
3557

    
3558
    # Modify arbitrary params in case instance template is ext
3559
    for key, value in params.iteritems():
3560
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3561
          self.instance.disk_template == constants.DT_EXT):
3562
        # stolen from GetUpdatedParams: default means reset/delete
3563
        if value.lower() == constants.VALUE_DEFAULT:
3564
          try:
3565
            del disk.params[key]
3566
          except KeyError:
3567
            pass
3568
        else:
3569
          disk.params[key] = value
3570
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3571

    
3572
    return changes
3573

    
3574
  def _RemoveDisk(self, idx, root, _):
3575
    """Removes a disk.
3576

3577
    """
3578
    hotmsg = ""
3579
    if self.op.hotplug:
3580
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3581
                                   constants.HOTPLUG_TARGET_DISK,
3582
                                   root, None, idx)
3583
      ShutdownInstanceDisks(self, self.instance, [root])
3584

    
3585
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3586
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3587
                             self.instance.primary_node):
3588
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3589
              .fail_msg
3590
      if msg:
3591
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3592
                        " continuing anyway", idx,
3593
                        self.cfg.GetNodeName(node_uuid), msg)
3594

    
3595
    # if this is a DRBD disk, return its port to the pool
3596
    if root.dev_type in constants.DTS_DRBD:
3597
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3598

    
3599
    return hotmsg
3600

    
3601
  def _CreateNewNic(self, idx, params, private):
3602
    """Creates data structure for a new network interface.
3603

3604
    """
3605
    mac = params[constants.INIC_MAC]
3606
    ip = params.get(constants.INIC_IP, None)
3607
    net = params.get(constants.INIC_NETWORK, None)
3608
    name = params.get(constants.INIC_NAME, None)
3609
    net_uuid = self.cfg.LookupNetwork(net)
3610
    #TODO: not private.filled?? can a nic have no nicparams??
3611
    nicparams = private.filled
3612
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3613
                       nicparams=nicparams)
3614
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3615

    
3616
    changes = [
3617
      ("nic.%d" % idx,
3618
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3619
       (mac, ip, private.filled[constants.NIC_MODE],
3620
       private.filled[constants.NIC_LINK], net)),
3621
      ]
3622

    
3623
    if self.op.hotplug:
3624
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3625
                                constants.HOTPLUG_TARGET_NIC,
3626
                                nobj, None, idx)
3627
      changes.append(("nic.%d" % idx, msg))
3628

    
3629
    return (nobj, changes)
3630

    
3631
  def _ApplyNicMods(self, idx, nic, params, private):
3632
    """Modifies a network interface.
3633

3634
    """
3635
    changes = []
3636

    
3637
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3638
      if key in params:
3639
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3640
        setattr(nic, key, params[key])
3641

    
3642
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3643
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3644
    if new_net_uuid != nic.network:
3645
      changes.append(("nic.network/%d" % idx, new_net))
3646
      nic.network = new_net_uuid
3647

    
3648
    if private.filled:
3649
      nic.nicparams = private.filled
3650

    
3651
      for (key, val) in nic.nicparams.items():
3652
        changes.append(("nic.%s/%d" % (key, idx), val))
3653

    
3654
    if self.op.hotplug:
3655
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3656
                                constants.HOTPLUG_TARGET_NIC,
3657
                                nic, None, idx)
3658
      changes.append(("nic/%d" % idx, msg))
3659

    
3660
    return changes
3661

    
3662
  def _RemoveNic(self, idx, nic, _):
3663
    if self.op.hotplug:
3664
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3665
                                 constants.HOTPLUG_TARGET_NIC,
3666
                                 nic, None, idx)
3667

    
3668
  def Exec(self, feedback_fn):
3669
    """Modifies an instance.
3670

3671
    All parameters take effect only at the next restart of the instance.
3672

3673
    """
3674
    # Process here the warnings from CheckPrereq, as we don't have a
3675
    # feedback_fn there.
3676
    # TODO: Replace with self.LogWarning
3677
    for warn in self.warn:
3678
      feedback_fn("WARNING: %s" % warn)
3679

    
3680
    assert ((self.op.disk_template is None) ^
3681
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3682
      "Not owning any node resource locks"
3683

    
3684
    result = []
3685

    
3686
    # New primary node
3687
    if self.op.pnode_uuid:
3688
      self.instance.primary_node = self.op.pnode_uuid
3689

    
3690
    # runtime memory
3691
    if self.op.runtime_mem:
3692
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3693
                                                     self.instance,
3694
                                                     self.op.runtime_mem)
3695
      rpcres.Raise("Cannot modify instance runtime memory")
3696
      result.append(("runtime_memory", self.op.runtime_mem))
3697

    
3698
    # Apply disk changes
3699
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3700
                        self._CreateNewDisk, self._ModifyDisk,
3701
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3702
    _UpdateIvNames(0, self.instance.disks)
3703

    
3704
    if self.op.disk_template:
3705
      if __debug__:
3706
        check_nodes = set(self.instance.all_nodes)
3707
        if self.op.remote_node_uuid:
3708
          check_nodes.add(self.op.remote_node_uuid)
3709
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3710
          owned = self.owned_locks(level)
3711
          assert not (check_nodes - owned), \
3712
            ("Not owning the correct locks, owning %r, expected at least %r" %
3713
             (owned, check_nodes))
3714

    
3715
      r_shut = ShutdownInstanceDisks(self, self.instance)
3716
      if not r_shut:
3717
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3718
                                 " proceed with disk template conversion")
3719
      mode = (self.instance.disk_template, self.op.disk_template)
3720
      try:
3721
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3722
      except:
3723
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3724
        raise
3725
      result.append(("disk_template", self.op.disk_template))
3726

    
3727
      assert self.instance.disk_template == self.op.disk_template, \
3728
        ("Expected disk template '%s', found '%s'" %
3729
         (self.op.disk_template, self.instance.disk_template))
3730

    
3731
    # Release node and resource locks if there are any (they might already have
3732
    # been released during disk conversion)
3733
    ReleaseLocks(self, locking.LEVEL_NODE)
3734
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3735

    
3736
    # Apply NIC changes
3737
    if self._new_nics is not None:
3738
      self.instance.nics = self._new_nics
3739
      result.extend(self._nic_chgdesc)
3740

    
3741
    # hvparams changes
3742
    if self.op.hvparams:
3743
      self.instance.hvparams = self.hv_inst
3744
      for key, val in self.op.hvparams.iteritems():
3745
        result.append(("hv/%s" % key, val))
3746

    
3747
    # beparams changes
3748
    if self.op.beparams:
3749
      self.instance.beparams = self.be_inst
3750
      for key, val in self.op.beparams.iteritems():
3751
        result.append(("be/%s" % key, val))
3752

    
3753
    # OS change
3754
    if self.op.os_name:
3755
      self.instance.os = self.op.os_name
3756

    
3757
    # osparams changes
3758
    if self.op.osparams:
3759
      self.instance.osparams = self.os_inst
3760
      for key, val in self.op.osparams.iteritems():
3761
        result.append(("os/%s" % key, val))
3762

    
3763
    if self.op.osparams_private:
3764
      self.instance.osparams_private = self.os_inst_private
3765
      for key, val in self.op.osparams_private.iteritems():
3766
        # Show the Private(...) blurb.
3767
        result.append(("os_private/%s" % key, repr(val)))
3768

    
3769
    if self.op.offline is None:
3770
      # Ignore
3771
      pass
3772
    elif self.op.offline:
3773
      # Mark instance as offline
3774
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3775
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3776
    else:
3777
      # Mark instance as online, but stopped
3778
      self.cfg.MarkInstanceDown(self.instance.uuid)
3779
      result.append(("admin_state", constants.ADMINST_DOWN))
3780

    
3781
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3782

    
3783
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3784
                self.owned_locks(locking.LEVEL_NODE)), \
3785
      "All node locks should have been released by now"
3786

    
3787
    return result
3788

    
3789
  _DISK_CONVERSIONS = {
3790
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3791
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3792
    }
3793

    
3794

    
3795
class LUInstanceChangeGroup(LogicalUnit):
3796
  HPATH = "instance-change-group"
3797
  HTYPE = constants.HTYPE_INSTANCE
3798
  REQ_BGL = False
3799

    
3800
  def ExpandNames(self):
3801
    self.share_locks = ShareAll()
3802

    
3803
    self.needed_locks = {
3804
      locking.LEVEL_NODEGROUP: [],
3805
      locking.LEVEL_NODE: [],
3806
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3807
      }
3808

    
3809
    self._ExpandAndLockInstance()
3810

    
3811
    if self.op.target_groups:
3812
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3813
                                  self.op.target_groups)
3814
    else:
3815
      self.req_target_uuids = None
3816

    
3817
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3818

    
3819
  def DeclareLocks(self, level):
3820
    if level == locking.LEVEL_NODEGROUP:
3821
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3822

    
3823
      if self.req_target_uuids:
3824
        lock_groups = set(self.req_target_uuids)
3825

    
3826
        # Lock all groups used by instance optimistically; this requires going
3827
        # via the node before it's locked, requiring verification later on
3828
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3829
        lock_groups.update(instance_groups)
3830
      else:
3831
        # No target groups, need to lock all of them
3832
        lock_groups = locking.ALL_SET
3833

    
3834
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3835

    
3836
    elif level == locking.LEVEL_NODE:
3837
      if self.req_target_uuids:
3838
        # Lock all nodes used by instances
3839
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3840
        self._LockInstancesNodes()
3841

    
3842
        # Lock all nodes in all potential target groups
3843
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3844
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3845
        member_nodes = [node_uuid
3846
                        for group in lock_groups
3847
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3848
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3849
      else:
3850
        # Lock all nodes as all groups are potential targets
3851
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3852

    
3853
  def CheckPrereq(self):
3854
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3855
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3856
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3857

    
3858
    assert (self.req_target_uuids is None or
3859
            owned_groups.issuperset(self.req_target_uuids))
3860
    assert owned_instance_names == set([self.op.instance_name])
3861

    
3862
    # Get instance information
3863
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3864

    
3865
    # Check if node groups for locked instance are still correct
3866
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3867
      ("Instance %s's nodes changed while we kept the lock" %
3868
       self.op.instance_name)
3869

    
3870
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3871
                                          owned_groups)
3872

    
3873
    if self.req_target_uuids:
3874
      # User requested specific target groups
3875
      self.target_uuids = frozenset(self.req_target_uuids)
3876
    else:
3877
      # All groups except those used by the instance are potential targets
3878
      self.target_uuids = owned_groups - inst_groups
3879

    
3880
    conflicting_groups = self.target_uuids & inst_groups
3881
    if conflicting_groups:
3882
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3883
                                 " used by the instance '%s'" %
3884
                                 (utils.CommaJoin(conflicting_groups),
3885
                                  self.op.instance_name),
3886
                                 errors.ECODE_INVAL)
3887

    
3888
    if not self.target_uuids:
3889
      raise errors.OpPrereqError("There are no possible target groups",
3890
                                 errors.ECODE_INVAL)
3891

    
3892
  def BuildHooksEnv(self):
3893
    """Build hooks env.
3894

3895
    """
3896
    assert self.target_uuids
3897

    
3898
    env = {
3899
      "TARGET_GROUPS": " ".join(self.target_uuids),
3900
      }
3901

    
3902
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3903

    
3904
    return env
3905

    
3906
  def BuildHooksNodes(self):
3907
    """Build hooks nodes.
3908

3909
    """
3910
    mn = self.cfg.GetMasterNode()
3911
    return ([mn], [mn])
3912

    
3913
  def Exec(self, feedback_fn):
3914
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3915

    
3916
    assert instances == [self.op.instance_name], "Instance not locked"
3917

    
3918
    req = iallocator.IAReqGroupChange(instances=instances,
3919
                                      target_groups=list(self.target_uuids))
3920
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3921

    
3922
    ial.Run(self.op.iallocator)
3923

    
3924
    if not ial.success:
3925
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3926
                                 " instance '%s' using iallocator '%s': %s" %
3927
                                 (self.op.instance_name, self.op.iallocator,
3928
                                  ial.info), errors.ECODE_NORES)
3929

    
3930
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3931

    
3932
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3933
                 " instance '%s'", len(jobs), self.op.instance_name)
3934

    
3935
    return ResultWithJobs(jobs)