Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 3360026f

History | View | Annotate | Download (156.8 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
import ganeti.rpc.node as rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
56
  WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
57
  ComputeDisks, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
58
  GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
59
  AssembleInstanceDisks, CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177
    vlan = nic.get(constants.INIC_VLAN, None)
178

    
179
    if net is None or net.lower() == constants.VALUE_NONE:
180
      net = None
181
    else:
182
      if nic_mode_req is not None or link is not None:
183
        raise errors.OpPrereqError("If network is given, no mode or link"
184
                                   " is allowed to be passed",
185
                                   errors.ECODE_INVAL)
186

    
187
    # ip validity checks
188
    if ip is None or ip.lower() == constants.VALUE_NONE:
189
      nic_ip = None
190
    elif ip.lower() == constants.VALUE_AUTO:
191
      if not op.name_check:
192
        raise errors.OpPrereqError("IP address set to auto but name checks"
193
                                   " have been skipped",
194
                                   errors.ECODE_INVAL)
195
      nic_ip = default_ip
196
    else:
197
      # We defer pool operations until later, so that the iallocator has
198
      # filled in the instance's node(s) dimara
199
      if ip.lower() == constants.NIC_IP_POOL:
200
        if net is None:
201
          raise errors.OpPrereqError("if ip=pool, parameter network"
202
                                     " must be passed too",
203
                                     errors.ECODE_INVAL)
204

    
205
      elif not netutils.IPAddress.IsValid(ip):
206
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
207
                                   errors.ECODE_INVAL)
208

    
209
      nic_ip = ip
210

    
211
    # TODO: check the ip address for uniqueness
212
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
213
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
214
                                 errors.ECODE_INVAL)
215

    
216
    # MAC address verification
217
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
218
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
219
      mac = utils.NormalizeAndValidateMac(mac)
220

    
221
      try:
222
        # TODO: We need to factor this out
223
        cfg.ReserveMAC(mac, ec_id)
224
      except errors.ReservationError:
225
        raise errors.OpPrereqError("MAC address %s already in use"
226
                                   " in cluster" % mac,
227
                                   errors.ECODE_NOTUNIQUE)
228

    
229
    #  Build nic parameters
230
    nicparams = {}
231
    if nic_mode_req:
232
      nicparams[constants.NIC_MODE] = nic_mode
233
    if link:
234
      nicparams[constants.NIC_LINK] = link
235
    if vlan:
236
      nicparams[constants.NIC_VLAN] = vlan
237

    
238
    check_params = cluster.SimpleFillNIC(nicparams)
239
    objects.NIC.CheckParameterSyntax(check_params)
240
    net_uuid = cfg.LookupNetwork(net)
241
    name = nic.get(constants.INIC_NAME, None)
242
    if name is not None and name.lower() == constants.VALUE_NONE:
243
      name = None
244
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
245
                          network=net_uuid, nicparams=nicparams)
246
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
247
    nics.append(nic_obj)
248

    
249
  return nics
250

    
251

    
252
def _CheckForConflictingIp(lu, ip, node_uuid):
253
  """In case of conflicting IP address raise error.
254

255
  @type ip: string
256
  @param ip: IP address
257
  @type node_uuid: string
258
  @param node_uuid: node UUID
259

260
  """
261
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
262
  if conf_net is not None:
263
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
264
                                " network %s, but the target NIC does not." %
265
                                (ip, conf_net)),
266
                               errors.ECODE_STATE)
267

    
268
  return (None, None)
269

    
270

    
271
def _ComputeIPolicyInstanceSpecViolation(
272
  ipolicy, instance_spec, disk_template,
273
  _compute_fn=ComputeIPolicySpecViolation):
274
  """Compute if instance specs meets the specs of ipolicy.
275

276
  @type ipolicy: dict
277
  @param ipolicy: The ipolicy to verify against
278
  @param instance_spec: dict
279
  @param instance_spec: The instance spec to verify
280
  @type disk_template: string
281
  @param disk_template: the disk template of the instance
282
  @param _compute_fn: The function to verify ipolicy (unittest only)
283
  @see: L{ComputeIPolicySpecViolation}
284

285
  """
286
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
287
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
288
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
289
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
290
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
291
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
292

    
293
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
294
                     disk_sizes, spindle_use, disk_template)
295

    
296

    
297
def _ComputeInstanceCommunicationNIC(instance_name):
298
  """Compute the name of the instance NIC used by instance
299
  communication.
300

301
  With instance communication, a new NIC is added to the instance.
302
  This NIC has a special name that identities it as being part of
303
  instance communication, and not just a normal NIC.  This function
304
  generates the name of the NIC based on a prefix and the instance
305
  name
306

307
  @type instance_name: string
308
  @param instance_name: name of the instance the NIC belongs to
309

310
  @rtype: string
311
  @return: name of the NIC
312

313
  """
314
  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def _CheckDiskTemplateValid(self):
326
    """Checks validity of disk template.
327

328
    """
329
    cluster = self.cfg.GetClusterInfo()
330
    if self.op.disk_template is None:
331
      # FIXME: It would be better to take the default disk template from the
332
      # ipolicy, but for the ipolicy we need the primary node, which we get from
333
      # the iallocator, which wants the disk template as input. To solve this
334
      # chicken-and-egg problem, it should be possible to specify just a node
335
      # group from the iallocator and take the ipolicy from that.
336
      self.op.disk_template = cluster.enabled_disk_templates[0]
337
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
338

    
339
  def _CheckDiskArguments(self):
340
    """Checks validity of disk-related arguments.
341

342
    """
343
    # check that disk's names are unique and valid
344
    utils.ValidateDeviceNames("disk", self.op.disks)
345

    
346
    self._CheckDiskTemplateValid()
347

    
348
    # check disks. parameter names and consistent adopt/no-adopt strategy
349
    has_adopt = has_no_adopt = False
350
    for disk in self.op.disks:
351
      if self.op.disk_template != constants.DT_EXT:
352
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
353
      if constants.IDISK_ADOPT in disk:
354
        has_adopt = True
355
      else:
356
        has_no_adopt = True
357
    if has_adopt and has_no_adopt:
358
      raise errors.OpPrereqError("Either all disks are adopted or none is",
359
                                 errors.ECODE_INVAL)
360
    if has_adopt:
361
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
362
        raise errors.OpPrereqError("Disk adoption is not supported for the"
363
                                   " '%s' disk template" %
364
                                   self.op.disk_template,
365
                                   errors.ECODE_INVAL)
366
      if self.op.iallocator is not None:
367
        raise errors.OpPrereqError("Disk adoption not allowed with an"
368
                                   " iallocator script", errors.ECODE_INVAL)
369
      if self.op.mode == constants.INSTANCE_IMPORT:
370
        raise errors.OpPrereqError("Disk adoption not allowed for"
371
                                   " instance import", errors.ECODE_INVAL)
372
    else:
373
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
374
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
375
                                   " but no 'adopt' parameter given" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378

    
379
    self.adopt_disks = has_adopt
380

    
381
  def _CheckVLANArguments(self):
382
    """ Check validity of VLANs if given
383

384
    """
385
    for nic in self.op.nics:
386
      vlan = nic.get(constants.INIC_VLAN, None)
387
      if vlan:
388
        if vlan[0] == ".":
389
          # vlan starting with dot means single untagged vlan,
390
          # might be followed by trunk (:)
391
          if not vlan[1:].isdigit():
392
            vlanlist = vlan[1:].split(':')
393
            for vl in vlanlist:
394
              if not vl.isdigit():
395
                raise errors.OpPrereqError("Specified VLAN parameter is "
396
                                           "invalid : %s" % vlan,
397
                                             errors.ECODE_INVAL)
398
        elif vlan[0] == ":":
399
          # Trunk - tagged only
400
          vlanlist = vlan[1:].split(':')
401
          for vl in vlanlist:
402
            if not vl.isdigit():
403
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
404
                                           " : %s" % vlan, errors.ECODE_INVAL)
405
        elif vlan.isdigit():
406
          # This is the simplest case. No dots, only single digit
407
          # -> Create untagged access port, dot needs to be added
408
          nic[constants.INIC_VLAN] = "." + vlan
409
        else:
410
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                       " : %s" % vlan, errors.ECODE_INVAL)
412

    
413
  def CheckArguments(self):
414
    """Check arguments.
415

416
    """
417
    # do not require name_check to ease forward/backward compatibility
418
    # for tools
419
    if self.op.no_install and self.op.start:
420
      self.LogInfo("No-installation mode selected, disabling startup")
421
      self.op.start = False
422
    # validate/normalize the instance name
423
    self.op.instance_name = \
424
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
425

    
426
    if self.op.ip_check and not self.op.name_check:
427
      # TODO: make the ip check more flexible and not depend on the name check
428
      raise errors.OpPrereqError("Cannot do IP address check without a name"
429
                                 " check", errors.ECODE_INVAL)
430

    
431
    # add NIC for instance communication
432
    if self.op.instance_communication:
433
      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
434

    
435
      self.op.nics.append({constants.INIC_NAME: nic_name,
436
                           constants.INIC_MAC: constants.VALUE_GENERATE,
437
                           constants.INIC_IP: constants.NIC_IP_POOL,
438
                           constants.INIC_NETWORK:
439
                             self.cfg.GetInstanceCommunicationNetwork()})
440

    
441
    # check nics' parameter names
442
    for nic in self.op.nics:
443
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
444
    # check that NIC's parameters names are unique and valid
445
    utils.ValidateDeviceNames("NIC", self.op.nics)
446

    
447
    self._CheckVLANArguments()
448

    
449
    self._CheckDiskArguments()
450
    assert self.op.disk_template is not None
451

    
452
    # instance name verification
453
    if self.op.name_check:
454
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
455
      self.op.instance_name = self.hostname.name
456
      # used in CheckPrereq for ip ping check
457
      self.check_ip = self.hostname.ip
458
    else:
459
      self.check_ip = None
460

    
461
    # file storage checks
462
    if (self.op.file_driver and
463
        not self.op.file_driver in constants.FILE_DRIVER):
464
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
465
                                 self.op.file_driver, errors.ECODE_INVAL)
466

    
467
    # set default file_driver if unset and required
468
    if (not self.op.file_driver and
469
        self.op.disk_template in constants.DTS_FILEBASED):
470
      self.op.file_driver = constants.FD_LOOP
471

    
472
    ### Node/iallocator related checks
473
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
474

    
475
    if self.op.pnode is not None:
476
      if self.op.disk_template in constants.DTS_INT_MIRROR:
477
        if self.op.snode is None:
478
          raise errors.OpPrereqError("The networked disk templates need"
479
                                     " a mirror node", errors.ECODE_INVAL)
480
      elif self.op.snode:
481
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
482
                        " template")
483
        self.op.snode = None
484

    
485
    _CheckOpportunisticLocking(self.op)
486

    
487
    if self.op.mode == constants.INSTANCE_IMPORT:
488
      # On import force_variant must be True, because if we forced it at
489
      # initial install, our only chance when importing it back is that it
490
      # works again!
491
      self.op.force_variant = True
492

    
493
      if self.op.no_install:
494
        self.LogInfo("No-installation mode has no effect during import")
495

    
496
      if objects.GetOSImage(self.op.osparams):
497
        self.LogInfo("OS image has no effect during import")
498
    elif self.op.mode == constants.INSTANCE_CREATE:
499
      os_image = CheckOSImage(self.op)
500

    
501
      if self.op.os_type is None and os_image is None:
502
        raise errors.OpPrereqError("No guest OS or OS image specified",
503
                                   errors.ECODE_INVAL)
504

    
505
      if self.op.os_type is not None \
506
            and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
507
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
508
                                   " installation" % self.op.os_type,
509
                                   errors.ECODE_STATE)
510
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
511
      if objects.GetOSImage(self.op.osparams):
512
        self.LogInfo("OS image has no effect during import")
513

    
514
      self._cds = GetClusterDomainSecret()
515

    
516
      # Check handshake to ensure both clusters have the same domain secret
517
      src_handshake = self.op.source_handshake
518
      if not src_handshake:
519
        raise errors.OpPrereqError("Missing source handshake",
520
                                   errors.ECODE_INVAL)
521

    
522
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
523
                                                           src_handshake)
524
      if errmsg:
525
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
526
                                   errors.ECODE_INVAL)
527

    
528
      # Load and check source CA
529
      self.source_x509_ca_pem = self.op.source_x509_ca
530
      if not self.source_x509_ca_pem:
531
        raise errors.OpPrereqError("Missing source X509 CA",
532
                                   errors.ECODE_INVAL)
533

    
534
      try:
535
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
536
                                                    self._cds)
537
      except OpenSSL.crypto.Error, err:
538
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
539
                                   (err, ), errors.ECODE_INVAL)
540

    
541
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
542
      if errcode is not None:
543
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
544
                                   errors.ECODE_INVAL)
545

    
546
      self.source_x509_ca = cert
547

    
548
      src_instance_name = self.op.source_instance_name
549
      if not src_instance_name:
550
        raise errors.OpPrereqError("Missing source instance name",
551
                                   errors.ECODE_INVAL)
552

    
553
      self.source_instance_name = \
554
        netutils.GetHostname(name=src_instance_name).name
555

    
556
    else:
557
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
558
                                 self.op.mode, errors.ECODE_INVAL)
559

    
560
  def ExpandNames(self):
561
    """ExpandNames for CreateInstance.
562

563
    Figure out the right locks for instance creation.
564

565
    """
566
    self.needed_locks = {}
567

    
568
    # this is just a preventive check, but someone might still add this
569
    # instance in the meantime, and creation will fail at lock-add time
570
    if self.op.instance_name in\
571
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
572
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
573
                                 self.op.instance_name, errors.ECODE_EXISTS)
574

    
575
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
576

    
577
    if self.op.iallocator:
578
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
579
      # specifying a group on instance creation and then selecting nodes from
580
      # that group
581
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
582
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
583

    
584
      if self.op.opportunistic_locking:
585
        self.opportunistic_locks[locking.LEVEL_NODE] = True
586
    else:
587
      (self.op.pnode_uuid, self.op.pnode) = \
588
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
589
      nodelist = [self.op.pnode_uuid]
590
      if self.op.snode is not None:
591
        (self.op.snode_uuid, self.op.snode) = \
592
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
593
        nodelist.append(self.op.snode_uuid)
594
      self.needed_locks[locking.LEVEL_NODE] = nodelist
595

    
596
    # in case of import lock the source node too
597
    if self.op.mode == constants.INSTANCE_IMPORT:
598
      src_node = self.op.src_node
599
      src_path = self.op.src_path
600

    
601
      if src_path is None:
602
        self.op.src_path = src_path = self.op.instance_name
603

    
604
      if src_node is None:
605
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
606
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
607
        self.op.src_node = None
608
        if os.path.isabs(src_path):
609
          raise errors.OpPrereqError("Importing an instance from a path"
610
                                     " requires a source node option",
611
                                     errors.ECODE_INVAL)
612
      else:
613
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
614
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
615
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
616
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
617
        if not os.path.isabs(src_path):
618
          self.op.src_path = \
619
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
620

    
621
    self.needed_locks[locking.LEVEL_NODE_RES] = \
622
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
623

    
624
    # Optimistically acquire shared group locks (we're reading the
625
    # configuration).  We can't just call GetInstanceNodeGroups, because the
626
    # instance doesn't exist yet. Therefore we lock all node groups of all
627
    # nodes we have.
628
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
629
      # In the case we lock all nodes for opportunistic allocation, we have no
630
      # choice than to lock all groups, because they're allocated before nodes.
631
      # This is sad, but true. At least we release all those we don't need in
632
      # CheckPrereq later.
633
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
634
    else:
635
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
636
        list(self.cfg.GetNodeGroupsFromNodes(
637
          self.needed_locks[locking.LEVEL_NODE]))
638
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
639

    
640
  def DeclareLocks(self, level):
641
    if level == locking.LEVEL_NODE_RES and \
642
      self.opportunistic_locks[locking.LEVEL_NODE]:
643
      # Even when using opportunistic locking, we require the same set of
644
      # NODE_RES locks as we got NODE locks
645
      self.needed_locks[locking.LEVEL_NODE_RES] = \
646
        self.owned_locks(locking.LEVEL_NODE)
647

    
648
  def _RunAllocator(self):
649
    """Run the allocator based on input opcode.
650

651
    """
652
    if self.op.opportunistic_locking:
653
      # Only consider nodes for which a lock is held
654
      node_name_whitelist = self.cfg.GetNodeNames(
655
        self.owned_locks(locking.LEVEL_NODE))
656
    else:
657
      node_name_whitelist = None
658

    
659
    req = _CreateInstanceAllocRequest(self.op, self.disks,
660
                                      self.nics, self.be_full,
661
                                      node_name_whitelist)
662
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
663

    
664
    ial.Run(self.op.iallocator)
665

    
666
    if not ial.success:
667
      # When opportunistic locks are used only a temporary failure is generated
668
      if self.op.opportunistic_locking:
669
        ecode = errors.ECODE_TEMP_NORES
670
      else:
671
        ecode = errors.ECODE_NORES
672

    
673
      raise errors.OpPrereqError("Can't compute nodes using"
674
                                 " iallocator '%s': %s" %
675
                                 (self.op.iallocator, ial.info),
676
                                 ecode)
677

    
678
    (self.op.pnode_uuid, self.op.pnode) = \
679
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
680
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
681
                 self.op.instance_name, self.op.iallocator,
682
                 utils.CommaJoin(ial.result))
683

    
684
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
685

    
686
    if req.RequiredNodes() == 2:
687
      (self.op.snode_uuid, self.op.snode) = \
688
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
689

    
690
  def BuildHooksEnv(self):
691
    """Build hooks env.
692

693
    This runs on master, primary and secondary nodes of the instance.
694

695
    """
696
    env = {
697
      "ADD_MODE": self.op.mode,
698
      }
699
    if self.op.mode == constants.INSTANCE_IMPORT:
700
      env["SRC_NODE"] = self.op.src_node
701
      env["SRC_PATH"] = self.op.src_path
702
      env["SRC_IMAGES"] = self.src_images
703

    
704
    env.update(BuildInstanceHookEnv(
705
      name=self.op.instance_name,
706
      primary_node_name=self.op.pnode,
707
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
708
      status=self.op.start,
709
      os_type=self.op.os_type,
710
      minmem=self.be_full[constants.BE_MINMEM],
711
      maxmem=self.be_full[constants.BE_MAXMEM],
712
      vcpus=self.be_full[constants.BE_VCPUS],
713
      nics=NICListToTuple(self, self.nics),
714
      disk_template=self.op.disk_template,
715
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
716
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
717
             for d in self.disks],
718
      bep=self.be_full,
719
      hvp=self.hv_full,
720
      hypervisor_name=self.op.hypervisor,
721
      tags=self.op.tags,
722
      ))
723

    
724
    return env
725

    
726
  def BuildHooksNodes(self):
727
    """Build hooks nodes.
728

729
    """
730
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
731
    return nl, nl
732

    
733
  def _ReadExportInfo(self):
734
    """Reads the export information from disk.
735

736
    It will override the opcode source node and path with the actual
737
    information, if these two were not specified before.
738

739
    @return: the export information
740

741
    """
742
    assert self.op.mode == constants.INSTANCE_IMPORT
743

    
744
    if self.op.src_node_uuid is None:
745
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
746
      exp_list = self.rpc.call_export_list(locked_nodes)
747
      found = False
748
      for node_uuid in exp_list:
749
        if exp_list[node_uuid].fail_msg:
750
          continue
751
        if self.op.src_path in exp_list[node_uuid].payload:
752
          found = True
753
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
754
          self.op.src_node_uuid = node_uuid
755
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
756
                                            self.op.src_path)
757
          break
758
      if not found:
759
        raise errors.OpPrereqError("No export found for relative path %s" %
760
                                   self.op.src_path, errors.ECODE_INVAL)
761

    
762
    CheckNodeOnline(self, self.op.src_node_uuid)
763
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
764
    result.Raise("No export or invalid export found in dir %s" %
765
                 self.op.src_path)
766

    
767
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
768
    if not export_info.has_section(constants.INISECT_EXP):
769
      raise errors.ProgrammerError("Corrupted export config",
770
                                   errors.ECODE_ENVIRON)
771

    
772
    ei_version = export_info.get(constants.INISECT_EXP, "version")
773
    if int(ei_version) != constants.EXPORT_VERSION:
774
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
775
                                 (ei_version, constants.EXPORT_VERSION),
776
                                 errors.ECODE_ENVIRON)
777
    return export_info
778

    
779
  def _ReadExportParams(self, einfo):
780
    """Use export parameters as defaults.
781

782
    In case the opcode doesn't specify (as in override) some instance
783
    parameters, then try to use them from the export information, if
784
    that declares them.
785

786
    """
787
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
788

    
789
    if not self.op.disks:
790
      disks = []
791
      # TODO: import the disk iv_name too
792
      for idx in range(constants.MAX_DISKS):
793
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
794
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
795
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
796
          disk = {
797
            constants.IDISK_SIZE: disk_sz,
798
            constants.IDISK_NAME: disk_name
799
            }
800
          disks.append(disk)
801
      self.op.disks = disks
802
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
803
        raise errors.OpPrereqError("No disk info specified and the export"
804
                                   " is missing the disk information",
805
                                   errors.ECODE_INVAL)
806

    
807
    if not self.op.nics:
808
      nics = []
809
      for idx in range(constants.MAX_NICS):
810
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
811
          ndict = {}
812
          for name in [constants.INIC_IP,
813
                       constants.INIC_MAC, constants.INIC_NAME]:
814
            nic_param_name = "nic%d_%s" % (idx, name)
815
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
816
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
817
              ndict[name] = v
818
          network = einfo.get(constants.INISECT_INS,
819
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
820
          # in case network is given link and mode are inherited
821
          # from nodegroup's netparams and thus should not be passed here
822
          if network:
823
            ndict[constants.INIC_NETWORK] = network
824
          else:
825
            for name in list(constants.NICS_PARAMETERS):
826
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
827
              ndict[name] = v
828
          nics.append(ndict)
829
        else:
830
          break
831
      self.op.nics = nics
832

    
833
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
834
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
835

    
836
    if (self.op.hypervisor is None and
837
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
838
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
839

    
840
    if einfo.has_section(constants.INISECT_HYP):
841
      # use the export parameters but do not override the ones
842
      # specified by the user
843
      for name, value in einfo.items(constants.INISECT_HYP):
844
        if name not in self.op.hvparams:
845
          self.op.hvparams[name] = value
846

    
847
    if einfo.has_section(constants.INISECT_BEP):
848
      # use the parameters, without overriding
849
      for name, value in einfo.items(constants.INISECT_BEP):
850
        if name not in self.op.beparams:
851
          self.op.beparams[name] = value
852
        # Compatibility for the old "memory" be param
853
        if name == constants.BE_MEMORY:
854
          if constants.BE_MAXMEM not in self.op.beparams:
855
            self.op.beparams[constants.BE_MAXMEM] = value
856
          if constants.BE_MINMEM not in self.op.beparams:
857
            self.op.beparams[constants.BE_MINMEM] = value
858
    else:
859
      # try to read the parameters old style, from the main section
860
      for name in constants.BES_PARAMETERS:
861
        if (name not in self.op.beparams and
862
            einfo.has_option(constants.INISECT_INS, name)):
863
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
864

    
865
    if einfo.has_section(constants.INISECT_OSP):
866
      # use the parameters, without overriding
867
      for name, value in einfo.items(constants.INISECT_OSP):
868
        if name not in self.op.osparams:
869
          self.op.osparams[name] = value
870

    
871
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
872
      # use the parameters, without overriding
873
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
874
        if name not in self.op.osparams_private:
875
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
876

    
877
  def _RevertToDefaults(self, cluster):
878
    """Revert the instance parameters to the default values.
879

880
    """
881
    # hvparams
882
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
883
    for name in self.op.hvparams.keys():
884
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
885
        del self.op.hvparams[name]
886
    # beparams
887
    be_defs = cluster.SimpleFillBE({})
888
    for name in self.op.beparams.keys():
889
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
890
        del self.op.beparams[name]
891
    # nic params
892
    nic_defs = cluster.SimpleFillNIC({})
893
    for nic in self.op.nics:
894
      for name in constants.NICS_PARAMETERS:
895
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
896
          del nic[name]
897
    # osparams
898
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
899
    for name in self.op.osparams.keys():
900
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
901
        del self.op.osparams[name]
902

    
903
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
904
                                    os_params_private={})
905
    for name in self.op.osparams_private.keys():
906
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
907
        del self.op.osparams_private[name]
908

    
909
  def _CalculateFileStorageDir(self):
910
    """Calculate final instance file storage dir.
911

912
    """
913
    # file storage dir calculation/check
914
    self.instance_file_storage_dir = None
915
    if self.op.disk_template in constants.DTS_FILEBASED:
916
      # build the full file storage dir path
917
      joinargs = []
918

    
919
      cfg_storage = None
920
      if self.op.disk_template == constants.DT_FILE:
921
        cfg_storage = self.cfg.GetFileStorageDir()
922
      elif self.op.disk_template == constants.DT_SHARED_FILE:
923
        cfg_storage = self.cfg.GetSharedFileStorageDir()
924
      elif self.op.disk_template == constants.DT_GLUSTER:
925
        cfg_storage = self.cfg.GetGlusterStorageDir()
926

    
927
      if not cfg_storage:
928
        raise errors.OpPrereqError(
929
          "Cluster file storage dir for {tpl} storage type not defined".format(
930
            tpl=repr(self.op.disk_template)
931
          ),
932
          errors.ECODE_STATE
933
      )
934

    
935
      joinargs.append(cfg_storage)
936

    
937
      if self.op.file_storage_dir is not None:
938
        joinargs.append(self.op.file_storage_dir)
939

    
940
      if self.op.disk_template != constants.DT_GLUSTER:
941
        joinargs.append(self.op.instance_name)
942

    
943
      if len(joinargs) > 1:
944
        # pylint: disable=W0142
945
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
946
      else:
947
        self.instance_file_storage_dir = joinargs[0]
948

    
949
  def CheckPrereq(self): # pylint: disable=R0914
950
    """Check prerequisites.
951

952
    """
953
    # Check that the optimistically acquired groups are correct wrt the
954
    # acquired nodes
955
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
956
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
957
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
958
    if not owned_groups.issuperset(cur_groups):
959
      raise errors.OpPrereqError("New instance %s's node groups changed since"
960
                                 " locks were acquired, current groups are"
961
                                 " are '%s', owning groups '%s'; retry the"
962
                                 " operation" %
963
                                 (self.op.instance_name,
964
                                  utils.CommaJoin(cur_groups),
965
                                  utils.CommaJoin(owned_groups)),
966
                                 errors.ECODE_STATE)
967

    
968
    self._CalculateFileStorageDir()
969

    
970
    if self.op.mode == constants.INSTANCE_IMPORT:
971
      export_info = self._ReadExportInfo()
972
      self._ReadExportParams(export_info)
973
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
974
    else:
975
      self._old_instance_name = None
976

    
977
    if (not self.cfg.GetVGName() and
978
        self.op.disk_template not in constants.DTS_NOT_LVM):
979
      raise errors.OpPrereqError("Cluster does not support lvm-based"
980
                                 " instances", errors.ECODE_STATE)
981

    
982
    if (self.op.hypervisor is None or
983
        self.op.hypervisor == constants.VALUE_AUTO):
984
      self.op.hypervisor = self.cfg.GetHypervisorType()
985

    
986
    cluster = self.cfg.GetClusterInfo()
987
    enabled_hvs = cluster.enabled_hypervisors
988
    if self.op.hypervisor not in enabled_hvs:
989
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
990
                                 " cluster (%s)" %
991
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
992
                                 errors.ECODE_STATE)
993

    
994
    # Check tag validity
995
    for tag in self.op.tags:
996
      objects.TaggableObject.ValidateTag(tag)
997

    
998
    # check hypervisor parameter syntax (locally)
999
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
1000
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
1001
                                      self.op.hvparams)
1002
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
1003
    hv_type.CheckParameterSyntax(filled_hvp)
1004
    self.hv_full = filled_hvp
1005
    # check that we don't specify global parameters on an instance
1006
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
1007
                         "instance", "cluster")
1008

    
1009
    # fill and remember the beparams dict
1010
    self.be_full = _ComputeFullBeParams(self.op, cluster)
1011

    
1012
    # build os parameters
1013
    if self.op.osparams_private is None:
1014
      self.op.osparams_private = serializer.PrivateDict()
1015
    if self.op.osparams_secret is None:
1016
      self.op.osparams_secret = serializer.PrivateDict()
1017

    
1018
    self.os_full = cluster.SimpleFillOS(
1019
      self.op.os_type,
1020
      self.op.osparams,
1021
      os_params_private=self.op.osparams_private,
1022
      os_params_secret=self.op.osparams_secret
1023
    )
1024

    
1025
    # now that hvp/bep are in final format, let's reset to defaults,
1026
    # if told to do so
1027
    if self.op.identify_defaults:
1028
      self._RevertToDefaults(cluster)
1029

    
1030
    # NIC buildup
1031
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1032
                             self.proc.GetECId())
1033

    
1034
    # disk checks/pre-build
1035
    default_vg = self.cfg.GetVGName()
1036
    self.disks = ComputeDisks(self.op, default_vg)
1037

    
1038
    if self.op.mode == constants.INSTANCE_IMPORT:
1039
      disk_images = []
1040
      for idx in range(len(self.disks)):
1041
        option = "disk%d_dump" % idx
1042
        if export_info.has_option(constants.INISECT_INS, option):
1043
          # FIXME: are the old os-es, disk sizes, etc. useful?
1044
          export_name = export_info.get(constants.INISECT_INS, option)
1045
          image = utils.PathJoin(self.op.src_path, export_name)
1046
          disk_images.append(image)
1047
        else:
1048
          disk_images.append(False)
1049

    
1050
      self.src_images = disk_images
1051

    
1052
      if self.op.instance_name == self._old_instance_name:
1053
        for idx, nic in enumerate(self.nics):
1054
          if nic.mac == constants.VALUE_AUTO:
1055
            nic_mac_ini = "nic%d_mac" % idx
1056
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1057

    
1058
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1059

    
1060
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1061
    if self.op.ip_check:
1062
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1063
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1064
                                   (self.check_ip, self.op.instance_name),
1065
                                   errors.ECODE_NOTUNIQUE)
1066

    
1067
    #### mac address generation
1068
    # By generating here the mac address both the allocator and the hooks get
1069
    # the real final mac address rather than the 'auto' or 'generate' value.
1070
    # There is a race condition between the generation and the instance object
1071
    # creation, which means that we know the mac is valid now, but we're not
1072
    # sure it will be when we actually add the instance. If things go bad
1073
    # adding the instance will abort because of a duplicate mac, and the
1074
    # creation job will fail.
1075
    for nic in self.nics:
1076
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1077
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1078

    
1079
    #### allocator run
1080

    
1081
    if self.op.iallocator is not None:
1082
      self._RunAllocator()
1083

    
1084
    # Release all unneeded node locks
1085
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1086
                               self.op.src_node_uuid])
1087
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1088
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1089
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1090
    # Release all unneeded group locks
1091
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1092
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1093

    
1094
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1095
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1096
      "Node locks differ from node resource locks"
1097

    
1098
    #### node related checks
1099

    
1100
    # check primary node
1101
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1102
    assert self.pnode is not None, \
1103
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1104
    if pnode.offline:
1105
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1106
                                 pnode.name, errors.ECODE_STATE)
1107
    if pnode.drained:
1108
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1109
                                 pnode.name, errors.ECODE_STATE)
1110
    if not pnode.vm_capable:
1111
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1112
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1113

    
1114
    self.secondaries = []
1115

    
1116
    # Fill in any IPs from IP pools. This must happen here, because we need to
1117
    # know the nic's primary node, as specified by the iallocator
1118
    for idx, nic in enumerate(self.nics):
1119
      net_uuid = nic.network
1120
      if net_uuid is not None:
1121
        nobj = self.cfg.GetNetwork(net_uuid)
1122
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1123
        if netparams is None:
1124
          raise errors.OpPrereqError("No netparams found for network"
1125
                                     " %s. Probably not connected to"
1126
                                     " node's %s nodegroup" %
1127
                                     (nobj.name, self.pnode.name),
1128
                                     errors.ECODE_INVAL)
1129
        self.LogInfo("NIC/%d inherits netparams %s" %
1130
                     (idx, netparams.values()))
1131
        nic.nicparams = dict(netparams)
1132
        if nic.ip is not None:
1133
          if nic.ip.lower() == constants.NIC_IP_POOL:
1134
            try:
1135
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1136
            except errors.ReservationError:
1137
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1138
                                         " from the address pool" % idx,
1139
                                         errors.ECODE_STATE)
1140
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1141
          else:
1142
            try:
1143
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1144
                                 check=self.op.conflicts_check)
1145
            except errors.ReservationError:
1146
              raise errors.OpPrereqError("IP address %s already in use"
1147
                                         " or does not belong to network %s" %
1148
                                         (nic.ip, nobj.name),
1149
                                         errors.ECODE_NOTUNIQUE)
1150

    
1151
      # net is None, ip None or given
1152
      elif self.op.conflicts_check:
1153
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1154

    
1155
    # mirror node verification
1156
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1157
      if self.op.snode_uuid == pnode.uuid:
1158
        raise errors.OpPrereqError("The secondary node cannot be the"
1159
                                   " primary node", errors.ECODE_INVAL)
1160
      CheckNodeOnline(self, self.op.snode_uuid)
1161
      CheckNodeNotDrained(self, self.op.snode_uuid)
1162
      CheckNodeVmCapable(self, self.op.snode_uuid)
1163
      self.secondaries.append(self.op.snode_uuid)
1164

    
1165
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1166
      if pnode.group != snode.group:
1167
        self.LogWarning("The primary and secondary nodes are in two"
1168
                        " different node groups; the disk parameters"
1169
                        " from the first disk's node group will be"
1170
                        " used")
1171

    
1172
    nodes = [pnode]
1173
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1174
      nodes.append(snode)
1175
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1176
    excl_stor = compat.any(map(has_es, nodes))
1177
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1178
      raise errors.OpPrereqError("Disk template %s not supported with"
1179
                                 " exclusive storage" % self.op.disk_template,
1180
                                 errors.ECODE_STATE)
1181
    for disk in self.disks:
1182
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1183

    
1184
    node_uuids = [pnode.uuid] + self.secondaries
1185

    
1186
    if not self.adopt_disks:
1187
      if self.op.disk_template == constants.DT_RBD:
1188
        # _CheckRADOSFreeSpace() is just a placeholder.
1189
        # Any function that checks prerequisites can be placed here.
1190
        # Check if there is enough space on the RADOS cluster.
1191
        CheckRADOSFreeSpace()
1192
      elif self.op.disk_template == constants.DT_EXT:
1193
        # FIXME: Function that checks prereqs if needed
1194
        pass
1195
      elif self.op.disk_template in constants.DTS_LVM:
1196
        # Check lv size requirements, if not adopting
1197
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1198
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1199
      else:
1200
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1201
        pass
1202

    
1203
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1204
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1205
                                disk[constants.IDISK_ADOPT])
1206
                     for disk in self.disks])
1207
      if len(all_lvs) != len(self.disks):
1208
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1209
                                   errors.ECODE_INVAL)
1210
      for lv_name in all_lvs:
1211
        try:
1212
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1213
          # to ReserveLV uses the same syntax
1214
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1215
        except errors.ReservationError:
1216
          raise errors.OpPrereqError("LV named %s used by another instance" %
1217
                                     lv_name, errors.ECODE_NOTUNIQUE)
1218

    
1219
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1220
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1221

    
1222
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1223
                                       vg_names.payload.keys())[pnode.uuid]
1224
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1225
      node_lvs = node_lvs.payload
1226

    
1227
      delta = all_lvs.difference(node_lvs.keys())
1228
      if delta:
1229
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1230
                                   utils.CommaJoin(delta),
1231
                                   errors.ECODE_INVAL)
1232
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1233
      if online_lvs:
1234
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1235
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1236
                                   errors.ECODE_STATE)
1237
      # update the size of disk based on what is found
1238
      for dsk in self.disks:
1239
        dsk[constants.IDISK_SIZE] = \
1240
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1241
                                        dsk[constants.IDISK_ADOPT])][0]))
1242

    
1243
    elif self.op.disk_template == constants.DT_BLOCK:
1244
      # Normalize and de-duplicate device paths
1245
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1246
                       for disk in self.disks])
1247
      if len(all_disks) != len(self.disks):
1248
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1249
                                   errors.ECODE_INVAL)
1250
      baddisks = [d for d in all_disks
1251
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1252
      if baddisks:
1253
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1254
                                   " cannot be adopted" %
1255
                                   (utils.CommaJoin(baddisks),
1256
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1257
                                   errors.ECODE_INVAL)
1258

    
1259
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1260
                                            list(all_disks))[pnode.uuid]
1261
      node_disks.Raise("Cannot get block device information from node %s" %
1262
                       pnode.name)
1263
      node_disks = node_disks.payload
1264
      delta = all_disks.difference(node_disks.keys())
1265
      if delta:
1266
        raise errors.OpPrereqError("Missing block device(s): %s" %
1267
                                   utils.CommaJoin(delta),
1268
                                   errors.ECODE_INVAL)
1269
      for dsk in self.disks:
1270
        dsk[constants.IDISK_SIZE] = \
1271
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1272

    
1273
    # Check disk access param to be compatible with specified hypervisor
1274
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1275
    node_group = self.cfg.GetNodeGroup(node_info.group)
1276
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1277
    access_type = disk_params[self.op.disk_template].get(
1278
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1279
    )
1280

    
1281
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1282
                                            self.op.disk_template,
1283
                                            access_type):
1284
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1285
                                 " used with %s disk access param" %
1286
                                 (self.op.hypervisor, access_type),
1287
                                  errors.ECODE_STATE)
1288

    
1289
    # Verify instance specs
1290
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1291
    ispec = {
1292
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1293
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1294
      constants.ISPEC_DISK_COUNT: len(self.disks),
1295
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1296
                                  for disk in self.disks],
1297
      constants.ISPEC_NIC_COUNT: len(self.nics),
1298
      constants.ISPEC_SPINDLE_USE: spindle_use,
1299
      }
1300

    
1301
    group_info = self.cfg.GetNodeGroup(pnode.group)
1302
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1303
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1304
                                               self.op.disk_template)
1305
    if not self.op.ignore_ipolicy and res:
1306
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1307
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1308
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1309

    
1310
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1311

    
1312
    if self.op.os_type is not None:
1313
      CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1314

    
1315
    # check OS parameters (remotely)
1316
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1317

    
1318
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1319

    
1320
    #TODO: _CheckExtParams (remotely)
1321
    # Check parameters for extstorage
1322

    
1323
    # memory check on primary node
1324
    #TODO(dynmem): use MINMEM for checking
1325
    if self.op.start:
1326
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1327
                                self.op.hvparams)
1328
      CheckNodeFreeMemory(self, self.pnode.uuid,
1329
                          "creating instance %s" % self.op.instance_name,
1330
                          self.be_full[constants.BE_MAXMEM],
1331
                          self.op.hypervisor, hvfull)
1332

    
1333
    self.dry_run_result = list(node_uuids)
1334

    
1335
  def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
1336
    """Removes degraded disks and instance.
1337

1338
    It optionally checks whether disks are degraded.  If the disks are
1339
    degraded, they are removed and the instance is also removed from
1340
    the configuration.
1341

1342
    If L{disk_abort} is True, then the disks are considered degraded
1343
    and removed, and the instance is removed from the configuration.
1344

1345
    If L{disk_abort} is False, then it first checks whether disks are
1346
    degraded and, if so, it removes the disks and the instance is
1347
    removed from the configuration.
1348

1349
    @type feedback_fn: callable
1350
    @param feedback_fn: function used send feedback back to the caller
1351

1352
    @type disk_abort: boolean
1353
    @param disk_abort:
1354
      True if disks are degraded, False to first check if disks are
1355
      degraded
1356

1357
    @type instance: L{objects.Instance}
1358
    @param instance: instance containing the disks to check
1359

1360
    @rtype: NoneType
1361
    @return: None
1362
    @raise errors.OpPrereqError: if disks are degraded
1363

1364
    """
1365
    if disk_abort:
1366
      pass
1367
    elif self.op.wait_for_sync:
1368
      disk_abort = not WaitForSync(self, instance)
1369
    elif instance.disk_template in constants.DTS_INT_MIRROR:
1370
      # make sure the disks are not degraded (still sync-ing is ok)
1371
      feedback_fn("* checking mirrors status")
1372
      disk_abort = not WaitForSync(self, instance, oneshot=True)
1373
    else:
1374
      disk_abort = False
1375

    
1376
    if disk_abort:
1377
      RemoveDisks(self, instance)
1378
      self.cfg.RemoveInstance(instance.uuid)
1379
      # Make sure the instance lock gets removed
1380
      self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
1381
      raise errors.OpExecError("There are some degraded disks for"
1382
                               " this instance")
1383

    
1384
  def Exec(self, feedback_fn):
1385
    """Create and add the instance to the cluster.
1386

1387
    """
1388
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1389
                self.owned_locks(locking.LEVEL_NODE)), \
1390
      "Node locks differ from node resource locks"
1391

    
1392
    ht_kind = self.op.hypervisor
1393
    if ht_kind in constants.HTS_REQ_PORT:
1394
      network_port = self.cfg.AllocatePort()
1395
    else:
1396
      network_port = None
1397

    
1398
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1399

    
1400
    # This is ugly but we got a chicken-egg problem here
1401
    # We can only take the group disk parameters, as the instance
1402
    # has no disks yet (we are generating them right here).
1403
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1404
    disks = GenerateDiskTemplate(self,
1405
                                 self.op.disk_template,
1406
                                 instance_uuid, self.pnode.uuid,
1407
                                 self.secondaries,
1408
                                 self.disks,
1409
                                 self.instance_file_storage_dir,
1410
                                 self.op.file_driver,
1411
                                 0,
1412
                                 feedback_fn,
1413
                                 self.cfg.GetGroupDiskParams(nodegroup))
1414

    
1415
    if self.op.os_type is None:
1416
      os_type = ""
1417
    else:
1418
      os_type = self.op.os_type
1419

    
1420
    iobj = objects.Instance(name=self.op.instance_name,
1421
                            uuid=instance_uuid,
1422
                            os=os_type,
1423
                            primary_node=self.pnode.uuid,
1424
                            nics=self.nics, disks=disks,
1425
                            disk_template=self.op.disk_template,
1426
                            disks_active=False,
1427
                            admin_state=constants.ADMINST_DOWN,
1428
                            network_port=network_port,
1429
                            beparams=self.op.beparams,
1430
                            hvparams=self.op.hvparams,
1431
                            hypervisor=self.op.hypervisor,
1432
                            osparams=self.op.osparams,
1433
                            osparams_private=self.op.osparams_private,
1434
                            )
1435

    
1436
    if self.op.tags:
1437
      for tag in self.op.tags:
1438
        iobj.AddTag(tag)
1439

    
1440
    if self.adopt_disks:
1441
      if self.op.disk_template == constants.DT_PLAIN:
1442
        # rename LVs to the newly-generated names; we need to construct
1443
        # 'fake' LV disks with the old data, plus the new unique_id
1444
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1445
        rename_to = []
1446
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1447
          rename_to.append(t_dsk.logical_id)
1448
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1449
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1450
                                               zip(tmp_disks, rename_to))
1451
        result.Raise("Failed to rename adoped LVs")
1452
    else:
1453
      feedback_fn("* creating instance disks...")
1454
      try:
1455
        CreateDisks(self, iobj)
1456
      except errors.OpExecError:
1457
        self.LogWarning("Device creation failed")
1458
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1459
        raise
1460

    
1461
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1462

    
1463
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1464

    
1465
    # Declare that we don't want to remove the instance lock anymore, as we've
1466
    # added the instance to the config
1467
    del self.remove_locks[locking.LEVEL_INSTANCE]
1468

    
1469
    if self.op.mode == constants.INSTANCE_IMPORT:
1470
      # Release unused nodes
1471
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1472
    else:
1473
      # Release all nodes
1474
      ReleaseLocks(self, locking.LEVEL_NODE)
1475

    
1476
    # Wipe disks
1477
    disk_abort = False
1478
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1479
      feedback_fn("* wiping instance disks...")
1480
      try:
1481
        WipeDisks(self, iobj)
1482
      except errors.OpExecError, err:
1483
        logging.exception("Wiping disks failed")
1484
        self.LogWarning("Wiping instance disks failed (%s)", err)
1485
        disk_abort = True
1486

    
1487
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1488

    
1489
    # Image disks
1490
    os_image = objects.GetOSImage(iobj.osparams)
1491
    disk_abort = False
1492

    
1493
    if not self.adopt_disks and os_image is not None:
1494
      feedback_fn("* imaging instance disks...")
1495
      try:
1496
        ImageDisks(self, iobj, os_image)
1497
      except errors.OpExecError, err:
1498
        logging.exception("Imaging disks failed")
1499
        self.LogWarning("Imaging instance disks failed (%s)", err)
1500
        disk_abort = True
1501

    
1502
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1503

    
1504
    # instance disks are now active
1505
    iobj.disks_active = True
1506

    
1507
    # Release all node resource locks
1508
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1509

    
1510
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1511
      if self.op.mode == constants.INSTANCE_CREATE:
1512
        os_image = objects.GetOSImage(self.op.osparams)
1513

    
1514
        if os_image is None and not self.op.no_install:
1515
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1516
                        not self.op.wait_for_sync)
1517
          if pause_sync:
1518
            feedback_fn("* pausing disk sync to install instance OS")
1519
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1520
                                                              (iobj.disks,
1521
                                                               iobj), True)
1522
            for idx, success in enumerate(result.payload):
1523
              if not success:
1524
                logging.warn("pause-sync of instance %s for disk %d failed",
1525
                             self.op.instance_name, idx)
1526

    
1527
          feedback_fn("* running the instance OS create scripts...")
1528
          # FIXME: pass debug option from opcode to backend
1529
          os_add_result = \
1530
            self.rpc.call_instance_os_add(self.pnode.uuid,
1531
                                          (iobj, self.op.osparams_secret),
1532
                                          False,
1533
                                          self.op.debug_level)
1534
          if pause_sync:
1535
            feedback_fn("* resuming disk sync")
1536
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1537
                                                              (iobj.disks,
1538
                                                               iobj), False)
1539
            for idx, success in enumerate(result.payload):
1540
              if not success:
1541
                logging.warn("resume-sync of instance %s for disk %d failed",
1542
                             self.op.instance_name, idx)
1543

    
1544
          os_add_result.Raise("Could not add os for instance %s"
1545
                              " on node %s" % (self.op.instance_name,
1546
                                               self.pnode.name))
1547

    
1548
      else:
1549
        if self.op.mode == constants.INSTANCE_IMPORT:
1550
          feedback_fn("* running the instance OS import scripts...")
1551

    
1552
          transfers = []
1553

    
1554
          for idx, image in enumerate(self.src_images):
1555
            if not image:
1556
              continue
1557

    
1558
            # FIXME: pass debug option from opcode to backend
1559
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1560
                                               constants.IEIO_FILE, (image, ),
1561
                                               constants.IEIO_SCRIPT,
1562
                                               ((iobj.disks[idx], iobj), idx),
1563
                                               None)
1564
            transfers.append(dt)
1565

    
1566
          import_result = \
1567
            masterd.instance.TransferInstanceData(self, feedback_fn,
1568
                                                  self.op.src_node_uuid,
1569
                                                  self.pnode.uuid,
1570
                                                  self.pnode.secondary_ip,
1571
                                                  self.op.compress,
1572
                                                  iobj, transfers)
1573
          if not compat.all(import_result):
1574
            self.LogWarning("Some disks for instance %s on node %s were not"
1575
                            " imported successfully" % (self.op.instance_name,
1576
                                                        self.pnode.name))
1577

    
1578
          rename_from = self._old_instance_name
1579

    
1580
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1581
          feedback_fn("* preparing remote import...")
1582
          # The source cluster will stop the instance before attempting to make
1583
          # a connection. In some cases stopping an instance can take a long
1584
          # time, hence the shutdown timeout is added to the connection
1585
          # timeout.
1586
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1587
                             self.op.source_shutdown_timeout)
1588
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1589

    
1590
          assert iobj.primary_node == self.pnode.uuid
1591
          disk_results = \
1592
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1593
                                          self.source_x509_ca,
1594
                                          self._cds, self.op.compress, timeouts)
1595
          if not compat.all(disk_results):
1596
            # TODO: Should the instance still be started, even if some disks
1597
            # failed to import (valid for local imports, too)?
1598
            self.LogWarning("Some disks for instance %s on node %s were not"
1599
                            " imported successfully" % (self.op.instance_name,
1600
                                                        self.pnode.name))
1601

    
1602
          rename_from = self.source_instance_name
1603

    
1604
        else:
1605
          # also checked in the prereq part
1606
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1607
                                       % self.op.mode)
1608

    
1609
        # Run rename script on newly imported instance
1610
        assert iobj.name == self.op.instance_name
1611
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1612
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1613
                                                   rename_from,
1614
                                                   self.op.debug_level)
1615
        result.Warn("Failed to run rename script for %s on node %s" %
1616
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1617

    
1618
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1619

    
1620
    if self.op.start:
1621
      iobj.admin_state = constants.ADMINST_UP
1622
      self.cfg.Update(iobj, feedback_fn)
1623
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1624
                   self.pnode.name)
1625
      feedback_fn("* starting instance...")
1626
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1627
                                            False, self.op.reason)
1628
      result.Raise("Could not start instance")
1629

    
1630
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1631

    
1632

    
1633
class LUInstanceRename(LogicalUnit):
1634
  """Rename an instance.
1635

1636
  """
1637
  HPATH = "instance-rename"
1638
  HTYPE = constants.HTYPE_INSTANCE
1639

    
1640
  def CheckArguments(self):
1641
    """Check arguments.
1642

1643
    """
1644
    if self.op.ip_check and not self.op.name_check:
1645
      # TODO: make the ip check more flexible and not depend on the name check
1646
      raise errors.OpPrereqError("IP address check requires a name check",
1647
                                 errors.ECODE_INVAL)
1648

    
1649
  def BuildHooksEnv(self):
1650
    """Build hooks env.
1651

1652
    This runs on master, primary and secondary nodes of the instance.
1653

1654
    """
1655
    env = BuildInstanceHookEnvByObject(self, self.instance)
1656
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1657
    return env
1658

    
1659
  def BuildHooksNodes(self):
1660
    """Build hooks nodes.
1661

1662
    """
1663
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1664
    return (nl, nl)
1665

    
1666
  def CheckPrereq(self):
1667
    """Check prerequisites.
1668

1669
    This checks that the instance is in the cluster and is not running.
1670

1671
    """
1672
    (self.op.instance_uuid, self.op.instance_name) = \
1673
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1674
                                self.op.instance_name)
1675
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1676
    assert instance is not None
1677

    
1678
    # It should actually not happen that an instance is running with a disabled
1679
    # disk template, but in case it does, the renaming of file-based instances
1680
    # will fail horribly. Thus, we test it before.
1681
    if (instance.disk_template in constants.DTS_FILEBASED and
1682
        self.op.new_name != instance.name):
1683
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1684
                               instance.disk_template)
1685

    
1686
    CheckNodeOnline(self, instance.primary_node)
1687
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1688
                       msg="cannot rename")
1689
    self.instance = instance
1690

    
1691
    new_name = self.op.new_name
1692
    if self.op.name_check:
1693
      hostname = _CheckHostnameSane(self, new_name)
1694
      new_name = self.op.new_name = hostname.name
1695
      if (self.op.ip_check and
1696
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1697
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1698
                                   (hostname.ip, new_name),
1699
                                   errors.ECODE_NOTUNIQUE)
1700

    
1701
    instance_names = [inst.name for
1702
                      inst in self.cfg.GetAllInstancesInfo().values()]
1703
    if new_name in instance_names and new_name != instance.name:
1704
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1705
                                 new_name, errors.ECODE_EXISTS)
1706

    
1707
  def Exec(self, feedback_fn):
1708
    """Rename the instance.
1709

1710
    """
1711
    old_name = self.instance.name
1712

    
1713
    rename_file_storage = False
1714
    if (self.instance.disk_template in (constants.DT_FILE,
1715
                                        constants.DT_SHARED_FILE) and
1716
        self.op.new_name != self.instance.name):
1717
      old_file_storage_dir = os.path.dirname(
1718
                               self.instance.disks[0].logical_id[1])
1719
      rename_file_storage = True
1720

    
1721
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1722
    # Change the instance lock. This is definitely safe while we hold the BGL.
1723
    # Otherwise the new lock would have to be added in acquired mode.
1724
    assert self.REQ_BGL
1725
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1726

    
1727
    # re-read the instance from the configuration after rename
1728
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1729

    
1730
    if rename_file_storage:
1731
      new_file_storage_dir = os.path.dirname(
1732
                               renamed_inst.disks[0].logical_id[1])
1733
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1734
                                                     old_file_storage_dir,
1735
                                                     new_file_storage_dir)
1736
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1737
                   " (but the instance has been renamed in Ganeti)" %
1738
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1739
                    old_file_storage_dir, new_file_storage_dir))
1740

    
1741
    StartInstanceDisks(self, renamed_inst, None)
1742
    # update info on disks
1743
    info = GetInstanceInfoText(renamed_inst)
1744
    for (idx, disk) in enumerate(renamed_inst.disks):
1745
      for node_uuid in renamed_inst.all_nodes:
1746
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1747
                                                (disk, renamed_inst), info)
1748
        result.Warn("Error setting info on node %s for disk %s" %
1749
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1750
    try:
1751
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1752
                                                 renamed_inst, old_name,
1753
                                                 self.op.debug_level)
1754
      result.Warn("Could not run OS rename script for instance %s on node %s"
1755
                  " (but the instance has been renamed in Ganeti)" %
1756
                  (renamed_inst.name,
1757
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1758
                  self.LogWarning)
1759
    finally:
1760
      ShutdownInstanceDisks(self, renamed_inst)
1761

    
1762
    return renamed_inst.name
1763

    
1764

    
1765
class LUInstanceRemove(LogicalUnit):
1766
  """Remove an instance.
1767

1768
  """
1769
  HPATH = "instance-remove"
1770
  HTYPE = constants.HTYPE_INSTANCE
1771
  REQ_BGL = False
1772

    
1773
  def ExpandNames(self):
1774
    self._ExpandAndLockInstance()
1775
    self.needed_locks[locking.LEVEL_NODE] = []
1776
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1777
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1778

    
1779
  def DeclareLocks(self, level):
1780
    if level == locking.LEVEL_NODE:
1781
      self._LockInstancesNodes()
1782
    elif level == locking.LEVEL_NODE_RES:
1783
      # Copy node locks
1784
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1785
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1786

    
1787
  def BuildHooksEnv(self):
1788
    """Build hooks env.
1789

1790
    This runs on master, primary and secondary nodes of the instance.
1791

1792
    """
1793
    env = BuildInstanceHookEnvByObject(self, self.instance)
1794
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1795
    return env
1796

    
1797
  def BuildHooksNodes(self):
1798
    """Build hooks nodes.
1799

1800
    """
1801
    nl = [self.cfg.GetMasterNode()]
1802
    nl_post = list(self.instance.all_nodes) + nl
1803
    return (nl, nl_post)
1804

    
1805
  def CheckPrereq(self):
1806
    """Check prerequisites.
1807

1808
    This checks that the instance is in the cluster.
1809

1810
    """
1811
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1812
    assert self.instance is not None, \
1813
      "Cannot retrieve locked instance %s" % self.op.instance_name
1814

    
1815
  def Exec(self, feedback_fn):
1816
    """Remove the instance.
1817

1818
    """
1819
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1820
                 self.cfg.GetNodeName(self.instance.primary_node))
1821

    
1822
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1823
                                             self.instance,
1824
                                             self.op.shutdown_timeout,
1825
                                             self.op.reason)
1826
    if self.op.ignore_failures:
1827
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1828
    else:
1829
      result.Raise("Could not shutdown instance %s on node %s" %
1830
                   (self.instance.name,
1831
                    self.cfg.GetNodeName(self.instance.primary_node)))
1832

    
1833
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1834
            self.owned_locks(locking.LEVEL_NODE_RES))
1835
    assert not (set(self.instance.all_nodes) -
1836
                self.owned_locks(locking.LEVEL_NODE)), \
1837
      "Not owning correct locks"
1838

    
1839
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1840

    
1841

    
1842
class LUInstanceMove(LogicalUnit):
1843
  """Move an instance by data-copying.
1844

1845
  """
1846
  HPATH = "instance-move"
1847
  HTYPE = constants.HTYPE_INSTANCE
1848
  REQ_BGL = False
1849

    
1850
  def ExpandNames(self):
1851
    self._ExpandAndLockInstance()
1852
    (self.op.target_node_uuid, self.op.target_node) = \
1853
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1854
                            self.op.target_node)
1855
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1856
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1857
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1858

    
1859
  def DeclareLocks(self, level):
1860
    if level == locking.LEVEL_NODE:
1861
      self._LockInstancesNodes(primary_only=True)
1862
    elif level == locking.LEVEL_NODE_RES:
1863
      # Copy node locks
1864
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1865
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1866

    
1867
  def BuildHooksEnv(self):
1868
    """Build hooks env.
1869

1870
    This runs on master, primary and target nodes of the instance.
1871

1872
    """
1873
    env = {
1874
      "TARGET_NODE": self.op.target_node,
1875
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1876
      }
1877
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1878
    return env
1879

    
1880
  def BuildHooksNodes(self):
1881
    """Build hooks nodes.
1882

1883
    """
1884
    nl = [
1885
      self.cfg.GetMasterNode(),
1886
      self.instance.primary_node,
1887
      self.op.target_node_uuid,
1888
      ]
1889
    return (nl, nl)
1890

    
1891
  def CheckPrereq(self):
1892
    """Check prerequisites.
1893

1894
    This checks that the instance is in the cluster.
1895

1896
    """
1897
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1898
    assert self.instance is not None, \
1899
      "Cannot retrieve locked instance %s" % self.op.instance_name
1900

    
1901
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1902
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1903
                                 self.instance.disk_template,
1904
                                 errors.ECODE_STATE)
1905

    
1906
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1907
    assert target_node is not None, \
1908
      "Cannot retrieve locked node %s" % self.op.target_node
1909

    
1910
    self.target_node_uuid = target_node.uuid
1911
    if target_node.uuid == self.instance.primary_node:
1912
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1913
                                 (self.instance.name, target_node.name),
1914
                                 errors.ECODE_STATE)
1915

    
1916
    cluster = self.cfg.GetClusterInfo()
1917
    bep = cluster.FillBE(self.instance)
1918

    
1919
    for idx, dsk in enumerate(self.instance.disks):
1920
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1921
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1922
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1923
                                   " cannot copy" % idx, errors.ECODE_STATE)
1924

    
1925
    CheckNodeOnline(self, target_node.uuid)
1926
    CheckNodeNotDrained(self, target_node.uuid)
1927
    CheckNodeVmCapable(self, target_node.uuid)
1928
    group_info = self.cfg.GetNodeGroup(target_node.group)
1929
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1930
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1931
                           ignore=self.op.ignore_ipolicy)
1932

    
1933
    if self.instance.admin_state == constants.ADMINST_UP:
1934
      # check memory requirements on the target node
1935
      CheckNodeFreeMemory(
1936
          self, target_node.uuid, "failing over instance %s" %
1937
          self.instance.name, bep[constants.BE_MAXMEM],
1938
          self.instance.hypervisor,
1939
          cluster.hvparams[self.instance.hypervisor])
1940
    else:
1941
      self.LogInfo("Not checking memory on the secondary node as"
1942
                   " instance will not be started")
1943

    
1944
    # check bridge existance
1945
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1946

    
1947
  def Exec(self, feedback_fn):
1948
    """Move an instance.
1949

1950
    The move is done by shutting it down on its present node, copying
1951
    the data over (slow) and starting it on the new node.
1952

1953
    """
1954
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1955
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1956

    
1957
    self.LogInfo("Shutting down instance %s on source node %s",
1958
                 self.instance.name, source_node.name)
1959

    
1960
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1961
            self.owned_locks(locking.LEVEL_NODE_RES))
1962

    
1963
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1964
                                             self.op.shutdown_timeout,
1965
                                             self.op.reason)
1966
    if self.op.ignore_consistency:
1967
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1968
                  " anyway. Please make sure node %s is down. Error details" %
1969
                  (self.instance.name, source_node.name, source_node.name),
1970
                  self.LogWarning)
1971
    else:
1972
      result.Raise("Could not shutdown instance %s on node %s" %
1973
                   (self.instance.name, source_node.name))
1974

    
1975
    # create the target disks
1976
    try:
1977
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1978
    except errors.OpExecError:
1979
      self.LogWarning("Device creation failed")
1980
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1981
      raise
1982

    
1983
    errs = []
1984
    transfers = []
1985
    # activate, get path, create transfer jobs
1986
    for idx, disk in enumerate(self.instance.disks):
1987
      # FIXME: pass debug option from opcode to backend
1988
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1989
                                         constants.IEIO_RAW_DISK,
1990
                                         (disk, self.instance),
1991
                                         constants.IEIO_RAW_DISK,
1992
                                         (disk, self.instance),
1993
                                         None)
1994
      transfers.append(dt)
1995

    
1996
    import_result = \
1997
      masterd.instance.TransferInstanceData(self, feedback_fn,
1998
                                            source_node.uuid,
1999
                                            target_node.uuid,
2000
                                            target_node.secondary_ip,
2001
                                            self.op.compress,
2002
                                            self.instance, transfers)
2003
    if not compat.all(import_result):
2004
      errs.append("Failed to transfer instance data")
2005

    
2006
    if errs:
2007
      self.LogWarning("Some disks failed to copy, aborting")
2008
      try:
2009
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
2010
      finally:
2011
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2012
        raise errors.OpExecError("Errors during disk copy: %s" %
2013
                                 (",".join(errs),))
2014

    
2015
    self.instance.primary_node = target_node.uuid
2016
    self.cfg.Update(self.instance, feedback_fn)
2017

    
2018
    self.LogInfo("Removing the disks on the original node")
2019
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
2020

    
2021
    # Only start the instance if it's marked as up
2022
    if self.instance.admin_state == constants.ADMINST_UP:
2023
      self.LogInfo("Starting instance %s on node %s",
2024
                   self.instance.name, target_node.name)
2025

    
2026
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
2027
                                          ignore_secondaries=True)
2028
      if not disks_ok:
2029
        ShutdownInstanceDisks(self, self.instance)
2030
        raise errors.OpExecError("Can't activate the instance's disks")
2031

    
2032
      result = self.rpc.call_instance_start(target_node.uuid,
2033
                                            (self.instance, None, None), False,
2034
                                            self.op.reason)
2035
      msg = result.fail_msg
2036
      if msg:
2037
        ShutdownInstanceDisks(self, self.instance)
2038
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
2039
                                 (self.instance.name, target_node.name, msg))
2040

    
2041

    
2042
class LUInstanceMultiAlloc(NoHooksLU):
2043
  """Allocates multiple instances at the same time.
2044

2045
  """
2046
  REQ_BGL = False
2047

    
2048
  def CheckArguments(self):
2049
    """Check arguments.
2050

2051
    """
2052
    nodes = []
2053
    for inst in self.op.instances:
2054
      if inst.iallocator is not None:
2055
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
2056
                                   " instance objects", errors.ECODE_INVAL)
2057
      nodes.append(bool(inst.pnode))
2058
      if inst.disk_template in constants.DTS_INT_MIRROR:
2059
        nodes.append(bool(inst.snode))
2060

    
2061
    has_nodes = compat.any(nodes)
2062
    if compat.all(nodes) ^ has_nodes:
2063
      raise errors.OpPrereqError("There are instance objects providing"
2064
                                 " pnode/snode while others do not",
2065
                                 errors.ECODE_INVAL)
2066

    
2067
    if not has_nodes and self.op.iallocator is None:
2068
      default_iallocator = self.cfg.GetDefaultIAllocator()
2069
      if default_iallocator:
2070
        self.op.iallocator = default_iallocator
2071
      else:
2072
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2073
                                   " given and no cluster-wide default"
2074
                                   " iallocator found; please specify either"
2075
                                   " an iallocator or nodes on the instances"
2076
                                   " or set a cluster-wide default iallocator",
2077
                                   errors.ECODE_INVAL)
2078

    
2079
    _CheckOpportunisticLocking(self.op)
2080

    
2081
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2082
    if dups:
2083
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2084
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2085

    
2086
  def ExpandNames(self):
2087
    """Calculate the locks.
2088

2089
    """
2090
    self.share_locks = ShareAll()
2091
    self.needed_locks = {
2092
      # iallocator will select nodes and even if no iallocator is used,
2093
      # collisions with LUInstanceCreate should be avoided
2094
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2095
      }
2096

    
2097
    if self.op.iallocator:
2098
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2099
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2100

    
2101
      if self.op.opportunistic_locking:
2102
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2103
    else:
2104
      nodeslist = []
2105
      for inst in self.op.instances:
2106
        (inst.pnode_uuid, inst.pnode) = \
2107
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2108
        nodeslist.append(inst.pnode_uuid)
2109
        if inst.snode is not None:
2110
          (inst.snode_uuid, inst.snode) = \
2111
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2112
          nodeslist.append(inst.snode_uuid)
2113

    
2114
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2115
      # Lock resources of instance's primary and secondary nodes (copy to
2116
      # prevent accidential modification)
2117
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2118

    
2119
  def DeclareLocks(self, level):
2120
    if level == locking.LEVEL_NODE_RES and \
2121
      self.opportunistic_locks[locking.LEVEL_NODE]:
2122
      # Even when using opportunistic locking, we require the same set of
2123
      # NODE_RES locks as we got NODE locks
2124
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2125
        self.owned_locks(locking.LEVEL_NODE)
2126

    
2127
  def CheckPrereq(self):
2128
    """Check prerequisite.
2129

2130
    """
2131
    if self.op.iallocator:
2132
      cluster = self.cfg.GetClusterInfo()
2133
      default_vg = self.cfg.GetVGName()
2134
      ec_id = self.proc.GetECId()
2135

    
2136
      if self.op.opportunistic_locking:
2137
        # Only consider nodes for which a lock is held
2138
        node_whitelist = self.cfg.GetNodeNames(
2139
                           list(self.owned_locks(locking.LEVEL_NODE)))
2140
      else:
2141
        node_whitelist = None
2142

    
2143
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2144
                                           _ComputeNics(op, cluster, None,
2145
                                                        self.cfg, ec_id),
2146
                                           _ComputeFullBeParams(op, cluster),
2147
                                           node_whitelist)
2148
               for op in self.op.instances]
2149

    
2150
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2151
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2152

    
2153
      ial.Run(self.op.iallocator)
2154

    
2155
      if not ial.success:
2156
        raise errors.OpPrereqError("Can't compute nodes using"
2157
                                   " iallocator '%s': %s" %
2158
                                   (self.op.iallocator, ial.info),
2159
                                   errors.ECODE_NORES)
2160

    
2161
      self.ia_result = ial.result
2162

    
2163
    if self.op.dry_run:
2164
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2165
        constants.JOB_IDS_KEY: [],
2166
        })
2167

    
2168
  def _ConstructPartialResult(self):
2169
    """Contructs the partial result.
2170

2171
    """
2172
    if self.op.iallocator:
2173
      (allocatable, failed_insts) = self.ia_result
2174
      allocatable_insts = map(compat.fst, allocatable)
2175
    else:
2176
      allocatable_insts = [op.instance_name for op in self.op.instances]
2177
      failed_insts = []
2178

    
2179
    return {
2180
      constants.ALLOCATABLE_KEY: allocatable_insts,
2181
      constants.FAILED_KEY: failed_insts,
2182
      }
2183

    
2184
  def Exec(self, feedback_fn):
2185
    """Executes the opcode.
2186

2187
    """
2188
    jobs = []
2189
    if self.op.iallocator:
2190
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2191
      (allocatable, failed) = self.ia_result
2192

    
2193
      for (name, node_names) in allocatable:
2194
        op = op2inst.pop(name)
2195

    
2196
        (op.pnode_uuid, op.pnode) = \
2197
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2198
        if len(node_names) > 1:
2199
          (op.snode_uuid, op.snode) = \
2200
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2201

    
2202
          jobs.append([op])
2203

    
2204
        missing = set(op2inst.keys()) - set(failed)
2205
        assert not missing, \
2206
          "Iallocator did return incomplete result: %s" % \
2207
          utils.CommaJoin(missing)
2208
    else:
2209
      jobs.extend([op] for op in self.op.instances)
2210

    
2211
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2212

    
2213

    
2214
class _InstNicModPrivate:
2215
  """Data structure for network interface modifications.
2216

2217
  Used by L{LUInstanceSetParams}.
2218

2219
  """
2220
  def __init__(self):
2221
    self.params = None
2222
    self.filled = None
2223

    
2224

    
2225
def _PrepareContainerMods(mods, private_fn):
2226
  """Prepares a list of container modifications by adding a private data field.
2227

2228
  @type mods: list of tuples; (operation, index, parameters)
2229
  @param mods: List of modifications
2230
  @type private_fn: callable or None
2231
  @param private_fn: Callable for constructing a private data field for a
2232
    modification
2233
  @rtype: list
2234

2235
  """
2236
  if private_fn is None:
2237
    fn = lambda: None
2238
  else:
2239
    fn = private_fn
2240

    
2241
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2242

    
2243

    
2244
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2245
  """Checks if nodes have enough physical CPUs
2246

2247
  This function checks if all given nodes have the needed number of
2248
  physical CPUs. In case any node has less CPUs or we cannot get the
2249
  information from the node, this function raises an OpPrereqError
2250
  exception.
2251

2252
  @type lu: C{LogicalUnit}
2253
  @param lu: a logical unit from which we get configuration data
2254
  @type node_uuids: C{list}
2255
  @param node_uuids: the list of node UUIDs to check
2256
  @type requested: C{int}
2257
  @param requested: the minimum acceptable number of physical CPUs
2258
  @type hypervisor_specs: list of pairs (string, dict of strings)
2259
  @param hypervisor_specs: list of hypervisor specifications in
2260
      pairs (hypervisor_name, hvparams)
2261
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2262
      or we cannot check the node
2263

2264
  """
2265
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2266
  for node_uuid in node_uuids:
2267
    info = nodeinfo[node_uuid]
2268
    node_name = lu.cfg.GetNodeName(node_uuid)
2269
    info.Raise("Cannot get current information from node %s" % node_name,
2270
               prereq=True, ecode=errors.ECODE_ENVIRON)
2271
    (_, _, (hv_info, )) = info.payload
2272
    num_cpus = hv_info.get("cpu_total", None)
2273
    if not isinstance(num_cpus, int):
2274
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2275
                                 " on node %s, result was '%s'" %
2276
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2277
    if requested > num_cpus:
2278
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2279
                                 "required" % (node_name, num_cpus, requested),
2280
                                 errors.ECODE_NORES)
2281

    
2282

    
2283
def GetItemFromContainer(identifier, kind, container):
2284
  """Return the item refered by the identifier.
2285

2286
  @type identifier: string
2287
  @param identifier: Item index or name or UUID
2288
  @type kind: string
2289
  @param kind: One-word item description
2290
  @type container: list
2291
  @param container: Container to get the item from
2292

2293
  """
2294
  # Index
2295
  try:
2296
    idx = int(identifier)
2297
    if idx == -1:
2298
      # Append
2299
      absidx = len(container) - 1
2300
    elif idx < 0:
2301
      raise IndexError("Not accepting negative indices other than -1")
2302
    elif idx > len(container):
2303
      raise IndexError("Got %s index %s, but there are only %s" %
2304
                       (kind, idx, len(container)))
2305
    else:
2306
      absidx = idx
2307
    return (absidx, container[idx])
2308
  except ValueError:
2309
    pass
2310

    
2311
  for idx, item in enumerate(container):
2312
    if item.uuid == identifier or item.name == identifier:
2313
      return (idx, item)
2314

    
2315
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2316
                             (kind, identifier), errors.ECODE_NOENT)
2317

    
2318

    
2319
def _ApplyContainerMods(kind, container, chgdesc, mods,
2320
                        create_fn, modify_fn, remove_fn,
2321
                        post_add_fn=None):
2322
  """Applies descriptions in C{mods} to C{container}.
2323

2324
  @type kind: string
2325
  @param kind: One-word item description
2326
  @type container: list
2327
  @param container: Container to modify
2328
  @type chgdesc: None or list
2329
  @param chgdesc: List of applied changes
2330
  @type mods: list
2331
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2332
  @type create_fn: callable
2333
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2334
    receives absolute item index, parameters and private data object as added
2335
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2336
    as list
2337
  @type modify_fn: callable
2338
  @param modify_fn: Callback for modifying an existing item
2339
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2340
    and private data object as added by L{_PrepareContainerMods}, returns
2341
    changes as list
2342
  @type remove_fn: callable
2343
  @param remove_fn: Callback on removing item; receives absolute item index,
2344
    item and private data object as added by L{_PrepareContainerMods}
2345
  @type post_add_fn: callable
2346
  @param post_add_fn: Callable for post-processing a newly created item after
2347
    it has been put into the container. It receives the index of the new item
2348
    and the new item as parameters.
2349

2350
  """
2351
  for (op, identifier, params, private) in mods:
2352
    changes = None
2353

    
2354
    if op == constants.DDM_ADD:
2355
      # Calculate where item will be added
2356
      # When adding an item, identifier can only be an index
2357
      try:
2358
        idx = int(identifier)
2359
      except ValueError:
2360
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2361
                                   " identifier for %s" % constants.DDM_ADD,
2362
                                   errors.ECODE_INVAL)
2363
      if idx == -1:
2364
        addidx = len(container)
2365
      else:
2366
        if idx < 0:
2367
          raise IndexError("Not accepting negative indices other than -1")
2368
        elif idx > len(container):
2369
          raise IndexError("Got %s index %s, but there are only %s" %
2370
                           (kind, idx, len(container)))
2371
        addidx = idx
2372

    
2373
      if create_fn is None:
2374
        item = params
2375
      else:
2376
        (item, changes) = create_fn(addidx, params, private)
2377

    
2378
      if idx == -1:
2379
        container.append(item)
2380
      else:
2381
        assert idx >= 0
2382
        assert idx <= len(container)
2383
        # list.insert does so before the specified index
2384
        container.insert(idx, item)
2385

    
2386
      if post_add_fn is not None:
2387
        post_add_fn(addidx, item)
2388

    
2389
    else:
2390
      # Retrieve existing item
2391
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2392

    
2393
      if op == constants.DDM_REMOVE:
2394
        assert not params
2395

    
2396
        changes = [("%s/%s" % (kind, absidx), "remove")]
2397

    
2398
        if remove_fn is not None:
2399
          msg = remove_fn(absidx, item, private)
2400
          if msg:
2401
            changes.append(("%s/%s" % (kind, absidx), msg))
2402

    
2403
        assert container[absidx] == item
2404
        del container[absidx]
2405
      elif op == constants.DDM_MODIFY:
2406
        if modify_fn is not None:
2407
          changes = modify_fn(absidx, item, params, private)
2408
      else:
2409
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2410

    
2411
    assert _TApplyContModsCbChanges(changes)
2412

    
2413
    if not (chgdesc is None or changes is None):
2414
      chgdesc.extend(changes)
2415

    
2416

    
2417
def _UpdateIvNames(base_index, disks):
2418
  """Updates the C{iv_name} attribute of disks.
2419

2420
  @type disks: list of L{objects.Disk}
2421

2422
  """
2423
  for (idx, disk) in enumerate(disks):
2424
    disk.iv_name = "disk/%s" % (base_index + idx, )
2425

    
2426

    
2427
class LUInstanceSetParams(LogicalUnit):
2428
  """Modifies an instances's parameters.
2429

2430
  """
2431
  HPATH = "instance-modify"
2432
  HTYPE = constants.HTYPE_INSTANCE
2433
  REQ_BGL = False
2434

    
2435
  @staticmethod
2436
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2437
    assert ht.TList(mods)
2438
    assert not mods or len(mods[0]) in (2, 3)
2439

    
2440
    if mods and len(mods[0]) == 2:
2441
      result = []
2442

    
2443
      addremove = 0
2444
      for op, params in mods:
2445
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2446
          result.append((op, -1, params))
2447
          addremove += 1
2448

    
2449
          if addremove > 1:
2450
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2451
                                       " supported at a time" % kind,
2452
                                       errors.ECODE_INVAL)
2453
        else:
2454
          result.append((constants.DDM_MODIFY, op, params))
2455

    
2456
      assert verify_fn(result)
2457
    else:
2458
      result = mods
2459

    
2460
    return result
2461

    
2462
  @staticmethod
2463
  def _CheckMods(kind, mods, key_types, item_fn):
2464
    """Ensures requested disk/NIC modifications are valid.
2465

2466
    """
2467
    for (op, _, params) in mods:
2468
      assert ht.TDict(params)
2469

    
2470
      # If 'key_types' is an empty dict, we assume we have an
2471
      # 'ext' template and thus do not ForceDictType
2472
      if key_types:
2473
        utils.ForceDictType(params, key_types)
2474

    
2475
      if op == constants.DDM_REMOVE:
2476
        if params:
2477
          raise errors.OpPrereqError("No settings should be passed when"
2478
                                     " removing a %s" % kind,
2479
                                     errors.ECODE_INVAL)
2480
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2481
        item_fn(op, params)
2482
      else:
2483
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2484

    
2485
  def _VerifyDiskModification(self, op, params, excl_stor):
2486
    """Verifies a disk modification.
2487

2488
    """
2489
    if op == constants.DDM_ADD:
2490
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2491
      if mode not in constants.DISK_ACCESS_SET:
2492
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2493
                                   errors.ECODE_INVAL)
2494

    
2495
      size = params.get(constants.IDISK_SIZE, None)
2496
      if size is None:
2497
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2498
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2499
      size = int(size)
2500

    
2501
      params[constants.IDISK_SIZE] = size
2502
      name = params.get(constants.IDISK_NAME, None)
2503
      if name is not None and name.lower() == constants.VALUE_NONE:
2504
        params[constants.IDISK_NAME] = None
2505

    
2506
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2507

    
2508
    elif op == constants.DDM_MODIFY:
2509
      if constants.IDISK_SIZE in params:
2510
        raise errors.OpPrereqError("Disk size change not possible, use"
2511
                                   " grow-disk", errors.ECODE_INVAL)
2512

    
2513
      # Disk modification supports changing only the disk name and mode.
2514
      # Changing arbitrary parameters is allowed only for ext disk template",
2515
      if self.instance.disk_template != constants.DT_EXT:
2516
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2517

    
2518
      name = params.get(constants.IDISK_NAME, None)
2519
      if name is not None and name.lower() == constants.VALUE_NONE:
2520
        params[constants.IDISK_NAME] = None
2521

    
2522
  @staticmethod
2523
  def _VerifyNicModification(op, params):
2524
    """Verifies a network interface modification.
2525

2526
    """
2527
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2528
      ip = params.get(constants.INIC_IP, None)
2529
      name = params.get(constants.INIC_NAME, None)
2530
      req_net = params.get(constants.INIC_NETWORK, None)
2531
      link = params.get(constants.NIC_LINK, None)
2532
      mode = params.get(constants.NIC_MODE, None)
2533
      if name is not None and name.lower() == constants.VALUE_NONE:
2534
        params[constants.INIC_NAME] = None
2535
      if req_net is not None:
2536
        if req_net.lower() == constants.VALUE_NONE:
2537
          params[constants.INIC_NETWORK] = None
2538
          req_net = None
2539
        elif link is not None or mode is not None:
2540
          raise errors.OpPrereqError("If network is given"
2541
                                     " mode or link should not",
2542
                                     errors.ECODE_INVAL)
2543

    
2544
      if op == constants.DDM_ADD:
2545
        macaddr = params.get(constants.INIC_MAC, None)
2546
        if macaddr is None:
2547
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2548

    
2549
      if ip is not None:
2550
        if ip.lower() == constants.VALUE_NONE:
2551
          params[constants.INIC_IP] = None
2552
        else:
2553
          if ip.lower() == constants.NIC_IP_POOL:
2554
            if op == constants.DDM_ADD and req_net is None:
2555
              raise errors.OpPrereqError("If ip=pool, parameter network"
2556
                                         " cannot be none",
2557
                                         errors.ECODE_INVAL)
2558
          else:
2559
            if not netutils.IPAddress.IsValid(ip):
2560
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2561
                                         errors.ECODE_INVAL)
2562

    
2563
      if constants.INIC_MAC in params:
2564
        macaddr = params[constants.INIC_MAC]
2565
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2566
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2567

    
2568
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2569
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2570
                                     " modifying an existing NIC",
2571
                                     errors.ECODE_INVAL)
2572

    
2573
  def CheckArguments(self):
2574
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2575
            self.op.hvparams or self.op.beparams or self.op.os_name or
2576
            self.op.osparams or self.op.offline is not None or
2577
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2578
            self.op.instance_communication is not None):
2579
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2580

    
2581
    if self.op.hvparams:
2582
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2583
                           "hypervisor", "instance", "cluster")
2584

    
2585
    self.op.disks = self._UpgradeDiskNicMods(
2586
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2587
    self.op.nics = self._UpgradeDiskNicMods(
2588
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2589

    
2590
    if self.op.disks and self.op.disk_template is not None:
2591
      raise errors.OpPrereqError("Disk template conversion and other disk"
2592
                                 " changes not supported at the same time",
2593
                                 errors.ECODE_INVAL)
2594

    
2595
    if (self.op.disk_template and
2596
        self.op.disk_template in constants.DTS_INT_MIRROR and
2597
        self.op.remote_node is None):
2598
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2599
                                 " one requires specifying a secondary node",
2600
                                 errors.ECODE_INVAL)
2601

    
2602
    # Check NIC modifications
2603
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2604
                    self._VerifyNicModification)
2605

    
2606
    if self.op.pnode:
2607
      (self.op.pnode_uuid, self.op.pnode) = \
2608
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2609

    
2610
  def ExpandNames(self):
2611
    self._ExpandAndLockInstance()
2612
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2613
    # Can't even acquire node locks in shared mode as upcoming changes in
2614
    # Ganeti 2.6 will start to modify the node object on disk conversion
2615
    self.needed_locks[locking.LEVEL_NODE] = []
2616
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2617
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2618
    # Look node group to look up the ipolicy
2619
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2620

    
2621
  def DeclareLocks(self, level):
2622
    if level == locking.LEVEL_NODEGROUP:
2623
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2624
      # Acquire locks for the instance's nodegroups optimistically. Needs
2625
      # to be verified in CheckPrereq
2626
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2627
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2628
    elif level == locking.LEVEL_NODE:
2629
      self._LockInstancesNodes()
2630
      if self.op.disk_template and self.op.remote_node:
2631
        (self.op.remote_node_uuid, self.op.remote_node) = \
2632
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2633
                                self.op.remote_node)
2634
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2635
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2636
      # Copy node locks
2637
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2638
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2639

    
2640
  def BuildHooksEnv(self):
2641
    """Build hooks env.
2642

2643
    This runs on the master, primary and secondaries.
2644

2645
    """
2646
    args = {}
2647
    if constants.BE_MINMEM in self.be_new:
2648
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2649
    if constants.BE_MAXMEM in self.be_new:
2650
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2651
    if constants.BE_VCPUS in self.be_new:
2652
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2653
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2654
    # information at all.
2655

    
2656
    if self._new_nics is not None:
2657
      nics = []
2658

    
2659
      for nic in self._new_nics:
2660
        n = copy.deepcopy(nic)
2661
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2662
        n.nicparams = nicparams
2663
        nics.append(NICToTuple(self, n))
2664

    
2665
      args["nics"] = nics
2666

    
2667
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2668
    if self.op.disk_template:
2669
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2670
    if self.op.runtime_mem:
2671
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2672

    
2673
    return env
2674

    
2675
  def BuildHooksNodes(self):
2676
    """Build hooks nodes.
2677

2678
    """
2679
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2680
    return (nl, nl)
2681

    
2682
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2683
                              old_params, cluster, pnode_uuid):
2684

    
2685
    update_params_dict = dict([(key, params[key])
2686
                               for key in constants.NICS_PARAMETERS
2687
                               if key in params])
2688

    
2689
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2690
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2691

    
2692
    new_net_uuid = None
2693
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2694
    if new_net_uuid_or_name:
2695
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2696
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2697

    
2698
    if old_net_uuid:
2699
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2700

    
2701
    if new_net_uuid:
2702
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2703
      if not netparams:
2704
        raise errors.OpPrereqError("No netparams found for the network"
2705
                                   " %s, probably not connected" %
2706
                                   new_net_obj.name, errors.ECODE_INVAL)
2707
      new_params = dict(netparams)
2708
    else:
2709
      new_params = GetUpdatedParams(old_params, update_params_dict)
2710

    
2711
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2712

    
2713
    new_filled_params = cluster.SimpleFillNIC(new_params)
2714
    objects.NIC.CheckParameterSyntax(new_filled_params)
2715

    
2716
    new_mode = new_filled_params[constants.NIC_MODE]
2717
    if new_mode == constants.NIC_MODE_BRIDGED:
2718
      bridge = new_filled_params[constants.NIC_LINK]
2719
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2720
      if msg:
2721
        msg = "Error checking bridges on node '%s': %s" % \
2722
                (self.cfg.GetNodeName(pnode_uuid), msg)
2723
        if self.op.force:
2724
          self.warn.append(msg)
2725
        else:
2726
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2727

    
2728
    elif new_mode == constants.NIC_MODE_ROUTED:
2729
      ip = params.get(constants.INIC_IP, old_ip)
2730
      if ip is None:
2731
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2732
                                   " on a routed NIC", errors.ECODE_INVAL)
2733

    
2734
    elif new_mode == constants.NIC_MODE_OVS:
2735
      # TODO: check OVS link
2736
      self.LogInfo("OVS links are currently not checked for correctness")
2737

    
2738
    if constants.INIC_MAC in params:
2739
      mac = params[constants.INIC_MAC]
2740
      if mac is None:
2741
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2742
                                   errors.ECODE_INVAL)
2743
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2744
        # otherwise generate the MAC address
2745
        params[constants.INIC_MAC] = \
2746
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2747
      else:
2748
        # or validate/reserve the current one
2749
        try:
2750
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2751
        except errors.ReservationError:
2752
          raise errors.OpPrereqError("MAC address '%s' already in use"
2753
                                     " in cluster" % mac,
2754
                                     errors.ECODE_NOTUNIQUE)
2755
    elif new_net_uuid != old_net_uuid:
2756

    
2757
      def get_net_prefix(net_uuid):
2758
        mac_prefix = None
2759
        if net_uuid:
2760
          nobj = self.cfg.GetNetwork(net_uuid)
2761
          mac_prefix = nobj.mac_prefix
2762

    
2763
        return mac_prefix
2764

    
2765
      new_prefix = get_net_prefix(new_net_uuid)
2766
      old_prefix = get_net_prefix(old_net_uuid)
2767
      if old_prefix != new_prefix:
2768
        params[constants.INIC_MAC] = \
2769
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2770

    
2771
    # if there is a change in (ip, network) tuple
2772
    new_ip = params.get(constants.INIC_IP, old_ip)
2773
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2774
      if new_ip:
2775
        # if IP is pool then require a network and generate one IP
2776
        if new_ip.lower() == constants.NIC_IP_POOL:
2777
          if new_net_uuid:
2778
            try:
2779
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2780
            except errors.ReservationError:
2781
              raise errors.OpPrereqError("Unable to get a free IP"
2782
                                         " from the address pool",
2783
                                         errors.ECODE_STATE)
2784
            self.LogInfo("Chose IP %s from network %s",
2785
                         new_ip,
2786
                         new_net_obj.name)
2787
            params[constants.INIC_IP] = new_ip
2788
          else:
2789
            raise errors.OpPrereqError("ip=pool, but no network found",
2790
                                       errors.ECODE_INVAL)
2791
        # Reserve new IP if in the new network if any
2792
        elif new_net_uuid:
2793
          try:
2794
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2795
                               check=self.op.conflicts_check)
2796
            self.LogInfo("Reserving IP %s in network %s",
2797
                         new_ip, new_net_obj.name)
2798
          except errors.ReservationError:
2799
            raise errors.OpPrereqError("IP %s not available in network %s" %
2800
                                       (new_ip, new_net_obj.name),
2801
                                       errors.ECODE_NOTUNIQUE)
2802
        # new network is None so check if new IP is a conflicting IP
2803
        elif self.op.conflicts_check:
2804
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2805

    
2806
      # release old IP if old network is not None
2807
      if old_ip and old_net_uuid:
2808
        try:
2809
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2810
        except errors.AddressPoolError:
2811
          logging.warning("Release IP %s not contained in network %s",
2812
                          old_ip, old_net_obj.name)
2813

    
2814
    # there are no changes in (ip, network) tuple and old network is not None
2815
    elif (old_net_uuid is not None and
2816
          (req_link is not None or req_mode is not None)):
2817
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2818
                                 " a NIC that is connected to a network",
2819
                                 errors.ECODE_INVAL)
2820

    
2821
    private.params = new_params
2822
    private.filled = new_filled_params
2823

    
2824
  def _PreCheckDiskTemplate(self, pnode_info):
2825
    """CheckPrereq checks related to a new disk template."""
2826
    # Arguments are passed to avoid configuration lookups
2827
    pnode_uuid = self.instance.primary_node
2828
    if self.instance.disk_template == self.op.disk_template:
2829
      raise errors.OpPrereqError("Instance already has disk template %s" %
2830
                                 self.instance.disk_template,
2831
                                 errors.ECODE_INVAL)
2832

    
2833
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2834
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2835
                                 " cluster." % self.op.disk_template)
2836

    
2837
    if (self.instance.disk_template,
2838
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2839
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2840
                                 " %s to %s" % (self.instance.disk_template,
2841
                                                self.op.disk_template),
2842
                                 errors.ECODE_INVAL)
2843
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2844
                       msg="cannot change disk template")
2845
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2846
      if self.op.remote_node_uuid == pnode_uuid:
2847
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2848
                                   " as the primary node of the instance" %
2849
                                   self.op.remote_node, errors.ECODE_STATE)
2850
      CheckNodeOnline(self, self.op.remote_node_uuid)
2851
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2852
      # FIXME: here we assume that the old instance type is DT_PLAIN
2853
      assert self.instance.disk_template == constants.DT_PLAIN
2854
      disks = [{constants.IDISK_SIZE: d.size,
2855
                constants.IDISK_VG: d.logical_id[0]}
2856
               for d in self.instance.disks]
2857
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2858
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2859

    
2860
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2861
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2862
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2863
                                                              snode_group)
2864
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2865
                             ignore=self.op.ignore_ipolicy)
2866
      if pnode_info.group != snode_info.group:
2867
        self.LogWarning("The primary and secondary nodes are in two"
2868
                        " different node groups; the disk parameters"
2869
                        " from the first disk's node group will be"
2870
                        " used")
2871

    
2872
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2873
      # Make sure none of the nodes require exclusive storage
2874
      nodes = [pnode_info]
2875
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2876
        assert snode_info
2877
        nodes.append(snode_info)
2878
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2879
      if compat.any(map(has_es, nodes)):
2880
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2881
                  " storage is enabled" % (self.instance.disk_template,
2882
                                           self.op.disk_template))
2883
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2884

    
2885
  def _PreCheckDisks(self, ispec):
2886
    """CheckPrereq checks related to disk changes.
2887

2888
    @type ispec: dict
2889
    @param ispec: instance specs to be updated with the new disks
2890

2891
    """
2892
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2893

    
2894
    excl_stor = compat.any(
2895
      rpc.GetExclusiveStorageForNodes(self.cfg,
2896
                                      self.instance.all_nodes).values()
2897
      )
2898

    
2899
    # Check disk modifications. This is done here and not in CheckArguments
2900
    # (as with NICs), because we need to know the instance's disk template
2901
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2902
    if self.instance.disk_template == constants.DT_EXT:
2903
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2904
    else:
2905
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2906
                      ver_fn)
2907

    
2908
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2909

    
2910
    # Check the validity of the `provider' parameter
2911
    if self.instance.disk_template in constants.DT_EXT:
2912
      for mod in self.diskmod:
2913
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2914
        if mod[0] == constants.DDM_ADD:
2915
          if ext_provider is None:
2916
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2917
                                       " '%s' missing, during disk add" %
2918
                                       (constants.DT_EXT,
2919
                                        constants.IDISK_PROVIDER),
2920
                                       errors.ECODE_NOENT)
2921
        elif mod[0] == constants.DDM_MODIFY:
2922
          if ext_provider:
2923
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2924
                                       " modification" %
2925
                                       constants.IDISK_PROVIDER,
2926
                                       errors.ECODE_INVAL)
2927
    else:
2928
      for mod in self.diskmod:
2929
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2930
        if ext_provider is not None:
2931
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2932
                                     " instances of type '%s'" %
2933
                                     (constants.IDISK_PROVIDER,
2934
                                      constants.DT_EXT),
2935
                                     errors.ECODE_INVAL)
2936

    
2937
    if not self.op.wait_for_sync and self.instance.disks_active:
2938
      for mod in self.diskmod:
2939
        if mod[0] == constants.DDM_ADD:
2940
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2941
                                     " activated disks and"
2942
                                     " --no-wait-for-sync given.",
2943
                                     errors.ECODE_INVAL)
2944

    
2945
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2946
      raise errors.OpPrereqError("Disk operations not supported for"
2947
                                 " diskless instances", errors.ECODE_INVAL)
2948

    
2949
    def _PrepareDiskMod(_, disk, params, __):
2950
      disk.name = params.get(constants.IDISK_NAME, None)
2951

    
2952
    # Verify disk changes (operating on a copy)
2953
    disks = copy.deepcopy(self.instance.disks)
2954
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2955
                        _PrepareDiskMod, None)
2956
    utils.ValidateDeviceNames("disk", disks)
2957
    if len(disks) > constants.MAX_DISKS:
2958
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2959
                                 " more" % constants.MAX_DISKS,
2960
                                 errors.ECODE_STATE)
2961
    disk_sizes = [disk.size for disk in self.instance.disks]
2962
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2963
                      self.diskmod if op == constants.DDM_ADD)
2964
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2965
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2966

    
2967
    if self.op.offline is not None and self.op.offline:
2968
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2969
                         msg="can't change to offline")
2970

    
2971
  @staticmethod
2972
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2973
    """Create a NIC mod that adds or removes the instance
2974
    communication NIC to a running instance.
2975

2976
    The NICS are dynamically created using the Dynamic Device
2977
    Modification (DDM).  This function produces a NIC modification
2978
    (mod) that inserts an additional NIC meant for instance
2979
    communication in or removes an existing instance communication NIC
2980
    from a running instance, using DDM.
2981

2982
    @type cfg: L{config.ConfigWriter}
2983
    @param cfg: cluster configuration
2984

2985
    @type instance_communication: boolean
2986
    @param instance_communication: whether instance communication is
2987
                                   enabled or disabled
2988

2989
    @type instance: L{objects.Instance}
2990
    @param instance: instance to which the NIC mod will be applied to
2991

2992
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
2993
            (L{constants.DDM_REMOVE}, -1, parameters) or
2994
            L{None}
2995
    @return: DDM mod containing an action to add or remove the NIC, or
2996
             None if nothing needs to be done
2997

2998
    """
2999
    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
3000

    
3001
    instance_communication_nic = None
3002

    
3003
    for nic in instance.nics:
3004
      if nic.name == nic_name:
3005
        instance_communication_nic = nic
3006
        break
3007

    
3008
    if instance_communication and not instance_communication_nic:
3009
      action = constants.DDM_ADD
3010
      params = {constants.INIC_NAME: nic_name,
3011
                constants.INIC_MAC: constants.VALUE_GENERATE,
3012
                constants.INIC_IP: constants.NIC_IP_POOL,
3013
                constants.INIC_NETWORK:
3014
                  cfg.GetInstanceCommunicationNetwork()}
3015
    elif not instance_communication and instance_communication_nic:
3016
      action = constants.DDM_REMOVE
3017
      params = None
3018
    else:
3019
      action = None
3020
      params = None
3021

    
3022
    if action is not None:
3023
      return (action, -1, params)
3024
    else:
3025
      return None
3026

    
3027
  def CheckPrereq(self):
3028
    """Check prerequisites.
3029

3030
    This only checks the instance list against the existing names.
3031

3032
    """
3033
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3034
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3035
    self.cluster = self.cfg.GetClusterInfo()
3036
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
3037

    
3038
    assert self.instance is not None, \
3039
      "Cannot retrieve locked instance %s" % self.op.instance_name
3040

    
3041
    pnode_uuid = self.instance.primary_node
3042

    
3043
    self.warn = []
3044

    
3045
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
3046
        not self.op.force):
3047
      # verify that the instance is not up
3048
      instance_info = self.rpc.call_instance_info(
3049
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3050
          cluster_hvparams)
3051
      if instance_info.fail_msg:
3052
        self.warn.append("Can't get instance runtime information: %s" %
3053
                         instance_info.fail_msg)
3054
      elif instance_info.payload:
3055
        raise errors.OpPrereqError("Instance is still running on %s" %
3056
                                   self.cfg.GetNodeName(pnode_uuid),
3057
                                   errors.ECODE_STATE)
3058

    
3059
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3060
    node_uuids = list(self.instance.all_nodes)
3061
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3062

    
3063
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3064
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3065
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3066

    
3067
    # dictionary with instance information after the modification
3068
    ispec = {}
3069

    
3070
    if self.op.hotplug or self.op.hotplug_if_possible:
3071
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3072
                                               self.instance)
3073
      if result.fail_msg:
3074
        if self.op.hotplug:
3075
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3076
                       prereq=True)
3077
        else:
3078
          self.LogWarning(result.fail_msg)
3079
          self.op.hotplug = False
3080
          self.LogInfo("Modification will take place without hotplugging.")
3081
      else:
3082
        self.op.hotplug = True
3083

    
3084
    # Prepare NIC modifications
3085
    # add or remove NIC for instance communication
3086
    if self.op.instance_communication is not None:
3087
      mod = self._InstanceCommunicationDDM(self.cfg,
3088
                                           self.op.instance_communication,
3089
                                           self.instance)
3090
      if mod is not None:
3091
        self.op.nics.append(mod)
3092

    
3093
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3094

    
3095
    # OS change
3096
    if self.op.os_name and not self.op.force:
3097
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
3098
                     self.op.force_variant)
3099
      instance_os = self.op.os_name
3100
    else:
3101
      instance_os = self.instance.os
3102

    
3103
    assert not (self.op.disk_template and self.op.disks), \
3104
      "Can't modify disk template and apply disk changes at the same time"
3105

    
3106
    if self.op.disk_template:
3107
      self._PreCheckDiskTemplate(pnode_info)
3108

    
3109
    self._PreCheckDisks(ispec)
3110

    
3111
    # hvparams processing
3112
    if self.op.hvparams:
3113
      hv_type = self.instance.hypervisor
3114
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3115
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3116
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3117

    
3118
      # local check
3119
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3120
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3121
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3122
      self.hv_inst = i_hvdict # the new dict (without defaults)
3123
    else:
3124
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3125
                                                   self.instance.os,
3126
                                                   self.instance.hvparams)
3127
      self.hv_new = self.hv_inst = {}
3128

    
3129
    # beparams processing
3130
    if self.op.beparams:
3131
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3132
                                  use_none=True)
3133
      objects.UpgradeBeParams(i_bedict)
3134
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3135
      be_new = self.cluster.SimpleFillBE(i_bedict)
3136
      self.be_proposed = self.be_new = be_new # the new actual values
3137
      self.be_inst = i_bedict # the new dict (without defaults)
3138
    else:
3139
      self.be_new = self.be_inst = {}
3140
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3141
    be_old = self.cluster.FillBE(self.instance)
3142

    
3143
    # CPU param validation -- checking every time a parameter is
3144
    # changed to cover all cases where either CPU mask or vcpus have
3145
    # changed
3146
    if (constants.BE_VCPUS in self.be_proposed and
3147
        constants.HV_CPU_MASK in self.hv_proposed):
3148
      cpu_list = \
3149
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3150
      # Verify mask is consistent with number of vCPUs. Can skip this
3151
      # test if only 1 entry in the CPU mask, which means same mask
3152
      # is applied to all vCPUs.
3153
      if (len(cpu_list) > 1 and
3154
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3155
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3156
                                   " CPU mask [%s]" %
3157
                                   (self.be_proposed[constants.BE_VCPUS],
3158
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3159
                                   errors.ECODE_INVAL)
3160

    
3161
      # Only perform this test if a new CPU mask is given
3162
      if constants.HV_CPU_MASK in self.hv_new:
3163
        # Calculate the largest CPU number requested
3164
        max_requested_cpu = max(map(max, cpu_list))
3165
        # Check that all of the instance's nodes have enough physical CPUs to
3166
        # satisfy the requested CPU mask
3167
        hvspecs = [(self.instance.hypervisor,
3168
                    self.cfg.GetClusterInfo()
3169
                      .hvparams[self.instance.hypervisor])]
3170
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3171
                                max_requested_cpu + 1,
3172
                                hvspecs)
3173

    
3174
    # osparams processing
3175
    if self.op.osparams or self.op.osparams_private:
3176
      public_parms = self.op.osparams or {}
3177
      private_parms = self.op.osparams_private or {}
3178
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3179

    
3180
      if dupe_keys:
3181
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3182
                                   utils.CommaJoin(dupe_keys))
3183

    
3184
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3185
                                      public_parms)
3186
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3187
                                              private_parms)
3188

    
3189
      CheckOSParams(self, True, node_uuids, instance_os,
3190
                    objects.FillDict(self.os_inst,
3191
                                     self.os_inst_private))
3192

    
3193
    else:
3194
      self.os_inst = {}
3195
      self.os_inst_private = {}
3196

    
3197
    #TODO(dynmem): do the appropriate check involving MINMEM
3198
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3199
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3200
      mem_check_list = [pnode_uuid]
3201
      if be_new[constants.BE_AUTO_BALANCE]:
3202
        # either we changed auto_balance to yes or it was from before
3203
        mem_check_list.extend(self.instance.secondary_nodes)
3204
      instance_info = self.rpc.call_instance_info(
3205
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3206
          cluster_hvparams)
3207
      hvspecs = [(self.instance.hypervisor,
3208
                  cluster_hvparams)]
3209
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3210
                                         hvspecs)
3211
      pninfo = nodeinfo[pnode_uuid]
3212
      msg = pninfo.fail_msg
3213
      if msg:
3214
        # Assume the primary node is unreachable and go ahead
3215
        self.warn.append("Can't get info from primary node %s: %s" %
3216
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3217
      else:
3218
        (_, _, (pnhvinfo, )) = pninfo.payload
3219
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3220
          self.warn.append("Node data from primary node %s doesn't contain"
3221
                           " free memory information" %
3222
                           self.cfg.GetNodeName(pnode_uuid))
3223
        elif instance_info.fail_msg:
3224
          self.warn.append("Can't get instance runtime information: %s" %
3225
                           instance_info.fail_msg)
3226
        else:
3227
          if instance_info.payload:
3228
            current_mem = int(instance_info.payload["memory"])
3229
          else:
3230
            # Assume instance not running
3231
            # (there is a slight race condition here, but it's not very
3232
            # probable, and we have no other way to check)
3233
            # TODO: Describe race condition
3234
            current_mem = 0
3235
          #TODO(dynmem): do the appropriate check involving MINMEM
3236
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3237
                      pnhvinfo["memory_free"])
3238
          if miss_mem > 0:
3239
            raise errors.OpPrereqError("This change will prevent the instance"
3240
                                       " from starting, due to %d MB of memory"
3241
                                       " missing on its primary node" %
3242
                                       miss_mem, errors.ECODE_NORES)
3243

    
3244
      if be_new[constants.BE_AUTO_BALANCE]:
3245
        for node_uuid, nres in nodeinfo.items():
3246
          if node_uuid not in self.instance.secondary_nodes:
3247
            continue
3248
          nres.Raise("Can't get info from secondary node %s" %
3249
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3250
                     ecode=errors.ECODE_STATE)
3251
          (_, _, (nhvinfo, )) = nres.payload
3252
          if not isinstance(nhvinfo.get("memory_free", None), int):
3253
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3254
                                       " memory information" %
3255
                                       self.cfg.GetNodeName(node_uuid),
3256
                                       errors.ECODE_STATE)
3257
          #TODO(dynmem): do the appropriate check involving MINMEM
3258
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3259
            raise errors.OpPrereqError("This change will prevent the instance"
3260
                                       " from failover to its secondary node"
3261
                                       " %s, due to not enough memory" %
3262
                                       self.cfg.GetNodeName(node_uuid),
3263
                                       errors.ECODE_STATE)
3264

    
3265
    if self.op.runtime_mem:
3266
      remote_info = self.rpc.call_instance_info(
3267
         self.instance.primary_node, self.instance.name,
3268
         self.instance.hypervisor,
3269
         cluster_hvparams)
3270
      remote_info.Raise("Error checking node %s" %
3271
                        self.cfg.GetNodeName(self.instance.primary_node))
3272
      if not remote_info.payload: # not running already
3273
        raise errors.OpPrereqError("Instance %s is not running" %
3274
                                   self.instance.name, errors.ECODE_STATE)
3275

    
3276
      current_memory = remote_info.payload["memory"]
3277
      if (not self.op.force and
3278
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3279
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3280
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3281
                                   " and %d MB of memory unless --force is"
3282
                                   " given" %
3283
                                   (self.instance.name,
3284
                                    self.be_proposed[constants.BE_MINMEM],
3285
                                    self.be_proposed[constants.BE_MAXMEM]),
3286
                                   errors.ECODE_INVAL)
3287

    
3288
      delta = self.op.runtime_mem - current_memory
3289
      if delta > 0:
3290
        CheckNodeFreeMemory(
3291
            self, self.instance.primary_node,
3292
            "ballooning memory for instance %s" % self.instance.name, delta,
3293
            self.instance.hypervisor,
3294
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3295

    
3296
    # make self.cluster visible in the functions below
3297
    cluster = self.cluster
3298

    
3299
    def _PrepareNicCreate(_, params, private):
3300
      self._PrepareNicModification(params, private, None, None,
3301
                                   {}, cluster, pnode_uuid)
3302
      return (None, None)
3303

    
3304
    def _PrepareNicMod(_, nic, params, private):
3305
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3306
                                   nic.nicparams, cluster, pnode_uuid)
3307
      return None
3308

    
3309
    def _PrepareNicRemove(_, params, __):
3310
      ip = params.ip
3311
      net = params.network
3312
      if net is not None and ip is not None:
3313
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3314

    
3315
    # Verify NIC changes (operating on copy)
3316
    nics = [nic.Copy() for nic in self.instance.nics]
3317
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3318
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3319
    if len(nics) > constants.MAX_NICS:
3320
      raise errors.OpPrereqError("Instance has too many network interfaces"
3321
                                 " (%d), cannot add more" % constants.MAX_NICS,
3322
                                 errors.ECODE_STATE)
3323

    
3324
    # Pre-compute NIC changes (necessary to use result in hooks)
3325
    self._nic_chgdesc = []
3326
    if self.nicmod:
3327
      # Operate on copies as this is still in prereq
3328
      nics = [nic.Copy() for nic in self.instance.nics]
3329
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3330
                          self._CreateNewNic, self._ApplyNicMods,
3331
                          self._RemoveNic)
3332
      # Verify that NIC names are unique and valid
3333
      utils.ValidateDeviceNames("NIC", nics)
3334
      self._new_nics = nics
3335
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3336
    else:
3337
      self._new_nics = None
3338
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3339

    
3340
    if not self.op.ignore_ipolicy:
3341
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3342
                                                              group_info)
3343

    
3344
      # Fill ispec with backend parameters
3345
      ispec[constants.ISPEC_SPINDLE_USE] = \
3346
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3347
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3348
                                                         None)
3349

    
3350
      # Copy ispec to verify parameters with min/max values separately
3351
      if self.op.disk_template:
3352
        new_disk_template = self.op.disk_template
3353
      else:
3354
        new_disk_template = self.instance.disk_template
3355
      ispec_max = ispec.copy()
3356
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3357
        self.be_new.get(constants.BE_MAXMEM, None)
3358
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3359
                                                     new_disk_template)
3360
      ispec_min = ispec.copy()
3361
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3362
        self.be_new.get(constants.BE_MINMEM, None)
3363
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3364
                                                     new_disk_template)
3365

    
3366
      if (res_max or res_min):
3367
        # FIXME: Improve error message by including information about whether
3368
        # the upper or lower limit of the parameter fails the ipolicy.
3369
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3370
               (group_info, group_info.name,
3371
                utils.CommaJoin(set(res_max + res_min))))
3372
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3373

    
3374
  def _ConvertPlainToDrbd(self, feedback_fn):
3375
    """Converts an instance from plain to drbd.
3376

3377
    """
3378
    feedback_fn("Converting template to drbd")
3379
    pnode_uuid = self.instance.primary_node
3380
    snode_uuid = self.op.remote_node_uuid
3381

    
3382
    assert self.instance.disk_template == constants.DT_PLAIN
3383

    
3384
    # create a fake disk info for _GenerateDiskTemplate
3385
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3386
                  constants.IDISK_VG: d.logical_id[0],
3387
                  constants.IDISK_NAME: d.name}
3388
                 for d in self.instance.disks]
3389
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3390
                                     self.instance.uuid, pnode_uuid,
3391
                                     [snode_uuid], disk_info, None, None, 0,
3392
                                     feedback_fn, self.diskparams)
3393
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3394
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3395
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3396
    info = GetInstanceInfoText(self.instance)
3397
    feedback_fn("Creating additional volumes...")
3398
    # first, create the missing data and meta devices
3399
    for disk in anno_disks:
3400
      # unfortunately this is... not too nice
3401
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3402
                           info, True, p_excl_stor)
3403
      for child in disk.children:
3404
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3405
                             s_excl_stor)
3406
    # at this stage, all new LVs have been created, we can rename the
3407
    # old ones
3408
    feedback_fn("Renaming original volumes...")
3409
    rename_list = [(o, n.children[0].logical_id)
3410
                   for (o, n) in zip(self.instance.disks, new_disks)]
3411
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3412
    result.Raise("Failed to rename original LVs")
3413

    
3414
    feedback_fn("Initializing DRBD devices...")
3415
    # all child devices are in place, we can now create the DRBD devices
3416
    try:
3417
      for disk in anno_disks:
3418
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3419
                                       (snode_uuid, s_excl_stor)]:
3420
          f_create = node_uuid == pnode_uuid
3421
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3422
                               f_create, excl_stor)
3423
    except errors.GenericError, e:
3424
      feedback_fn("Initializing of DRBD devices failed;"
3425
                  " renaming back original volumes...")
3426
      rename_back_list = [(n.children[0], o.logical_id)
3427
                          for (n, o) in zip(new_disks, self.instance.disks)]
3428
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3429
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3430
      raise
3431

    
3432
    # at this point, the instance has been modified
3433
    self.instance.disk_template = constants.DT_DRBD8
3434
    self.instance.disks = new_disks
3435
    self.cfg.Update(self.instance, feedback_fn)
3436

    
3437
    # Release node locks while waiting for sync
3438
    ReleaseLocks(self, locking.LEVEL_NODE)
3439

    
3440
    # disks are created, waiting for sync
3441
    disk_abort = not WaitForSync(self, self.instance,
3442
                                 oneshot=not self.op.wait_for_sync)
3443
    if disk_abort:
3444
      raise errors.OpExecError("There are some degraded disks for"
3445
                               " this instance, please cleanup manually")
3446

    
3447
    # Node resource locks will be released by caller
3448

    
3449
  def _ConvertDrbdToPlain(self, feedback_fn):
3450
    """Converts an instance from drbd to plain.
3451

3452
    """
3453
    assert len(self.instance.secondary_nodes) == 1
3454
    assert self.instance.disk_template == constants.DT_DRBD8
3455

    
3456
    pnode_uuid = self.instance.primary_node
3457
    snode_uuid = self.instance.secondary_nodes[0]
3458
    feedback_fn("Converting template to plain")
3459

    
3460
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3461
    new_disks = [d.children[0] for d in self.instance.disks]
3462

    
3463
    # copy over size, mode and name
3464
    for parent, child in zip(old_disks, new_disks):
3465
      child.size = parent.size
3466
      child.mode = parent.mode
3467
      child.name = parent.name
3468

    
3469
    # this is a DRBD disk, return its port to the pool
3470
    # NOTE: this must be done right before the call to cfg.Update!
3471
    for disk in old_disks:
3472
      tcp_port = disk.logical_id[2]
3473
      self.cfg.AddTcpUdpPort(tcp_port)
3474

    
3475
    # update instance structure
3476
    self.instance.disks = new_disks
3477
    self.instance.disk_template = constants.DT_PLAIN
3478
    _UpdateIvNames(0, self.instance.disks)
3479
    self.cfg.Update(self.instance, feedback_fn)
3480

    
3481
    # Release locks in case removing disks takes a while
3482
    ReleaseLocks(self, locking.LEVEL_NODE)
3483

    
3484
    feedback_fn("Removing volumes on the secondary node...")
3485
    for disk in old_disks:
3486
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3487
      result.Warn("Could not remove block device %s on node %s,"
3488
                  " continuing anyway" %
3489
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3490
                  self.LogWarning)
3491

    
3492
    feedback_fn("Removing unneeded volumes on the primary node...")
3493
    for idx, disk in enumerate(old_disks):
3494
      meta = disk.children[1]
3495
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3496
      result.Warn("Could not remove metadata for disk %d on node %s,"
3497
                  " continuing anyway" %
3498
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3499
                  self.LogWarning)
3500

    
3501
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3502
    self.LogInfo("Trying to hotplug device...")
3503
    msg = "hotplug:"
3504
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3505
                                          self.instance, action, dev_type,
3506
                                          (device, self.instance),
3507
                                          extra, seq)
3508
    if result.fail_msg:
3509
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3510
      self.LogInfo("Continuing execution..")
3511
      msg += "failed"
3512
    else:
3513
      self.LogInfo("Hotplug done.")
3514
      msg += "done"
3515
    return msg
3516

    
3517
  def _CreateNewDisk(self, idx, params, _):
3518
    """Creates a new disk.
3519

3520
    """
3521
    # add a new disk
3522
    if self.instance.disk_template in constants.DTS_FILEBASED:
3523
      (file_driver, file_path) = self.instance.disks[0].logical_id
3524
      file_path = os.path.dirname(file_path)
3525
    else:
3526
      file_driver = file_path = None
3527

    
3528
    disk = \
3529
      GenerateDiskTemplate(self, self.instance.disk_template,
3530
                           self.instance.uuid, self.instance.primary_node,
3531
                           self.instance.secondary_nodes, [params], file_path,
3532
                           file_driver, idx, self.Log, self.diskparams)[0]
3533

    
3534
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3535

    
3536
    if self.cluster.prealloc_wipe_disks:
3537
      # Wipe new disk
3538
      WipeOrCleanupDisks(self, self.instance,
3539
                         disks=[(idx, disk, 0)],
3540
                         cleanup=new_disks)
3541

    
3542
    changes = [
3543
      ("disk/%d" % idx,
3544
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3545
      ]
3546
    if self.op.hotplug:
3547
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3548
                                               (disk, self.instance),
3549
                                               self.instance.name, True, idx)
3550
      if result.fail_msg:
3551
        changes.append(("disk/%d" % idx, "assemble:failed"))
3552
        self.LogWarning("Can't assemble newly created disk %d: %s",
3553
                        idx, result.fail_msg)
3554
      else:
3555
        _, link_name = result.payload
3556
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3557
                                  constants.HOTPLUG_TARGET_DISK,
3558
                                  disk, link_name, idx)
3559
        changes.append(("disk/%d" % idx, msg))
3560

    
3561
    return (disk, changes)
3562

    
3563
  def _PostAddDisk(self, _, disk):
3564
    if not WaitForSync(self, self.instance, disks=[disk],
3565
                       oneshot=not self.op.wait_for_sync):
3566
      raise errors.OpExecError("Failed to sync disks of %s" %
3567
                               self.instance.name)
3568

    
3569
    # the disk is active at this point, so deactivate it if the instance disks
3570
    # are supposed to be inactive
3571
    if not self.instance.disks_active:
3572
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3573

    
3574
  def _ModifyDisk(self, idx, disk, params, _):
3575
    """Modifies a disk.
3576

3577
    """
3578
    changes = []
3579
    if constants.IDISK_MODE in params:
3580
      disk.mode = params.get(constants.IDISK_MODE)
3581
      changes.append(("disk.mode/%d" % idx, disk.mode))
3582

    
3583
    if constants.IDISK_NAME in params:
3584
      disk.name = params.get(constants.IDISK_NAME)
3585
      changes.append(("disk.name/%d" % idx, disk.name))
3586

    
3587
    # Modify arbitrary params in case instance template is ext
3588
    for key, value in params.iteritems():
3589
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3590
          self.instance.disk_template == constants.DT_EXT):
3591
        # stolen from GetUpdatedParams: default means reset/delete
3592
        if value.lower() == constants.VALUE_DEFAULT:
3593
          try:
3594
            del disk.params[key]
3595
          except KeyError:
3596
            pass
3597
        else:
3598
          disk.params[key] = value
3599
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3600

    
3601
    return changes
3602

    
3603
  def _RemoveDisk(self, idx, root, _):
3604
    """Removes a disk.
3605

3606
    """
3607
    hotmsg = ""
3608
    if self.op.hotplug:
3609
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3610
                                   constants.HOTPLUG_TARGET_DISK,
3611
                                   root, None, idx)
3612
      ShutdownInstanceDisks(self, self.instance, [root])
3613

    
3614
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3615
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3616
                             self.instance.primary_node):
3617
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3618
              .fail_msg
3619
      if msg:
3620
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3621
                        " continuing anyway", idx,
3622
                        self.cfg.GetNodeName(node_uuid), msg)
3623

    
3624
    # if this is a DRBD disk, return its port to the pool
3625
    if root.dev_type in constants.DTS_DRBD:
3626
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3627

    
3628
    return hotmsg
3629

    
3630
  def _CreateNewNic(self, idx, params, private):
3631
    """Creates data structure for a new network interface.
3632

3633
    """
3634
    mac = params[constants.INIC_MAC]
3635
    ip = params.get(constants.INIC_IP, None)
3636
    net = params.get(constants.INIC_NETWORK, None)
3637
    name = params.get(constants.INIC_NAME, None)
3638
    net_uuid = self.cfg.LookupNetwork(net)
3639
    #TODO: not private.filled?? can a nic have no nicparams??
3640
    nicparams = private.filled
3641
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3642
                       nicparams=nicparams)
3643
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3644

    
3645
    changes = [
3646
      ("nic.%d" % idx,
3647
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3648
       (mac, ip, private.filled[constants.NIC_MODE],
3649
       private.filled[constants.NIC_LINK], net)),
3650
      ]
3651

    
3652
    if self.op.hotplug:
3653
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3654
                                constants.HOTPLUG_TARGET_NIC,
3655
                                nobj, None, idx)
3656
      changes.append(("nic.%d" % idx, msg))
3657

    
3658
    return (nobj, changes)
3659

    
3660
  def _ApplyNicMods(self, idx, nic, params, private):
3661
    """Modifies a network interface.
3662

3663
    """
3664
    changes = []
3665

    
3666
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3667
      if key in params:
3668
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3669
        setattr(nic, key, params[key])
3670

    
3671
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3672
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3673
    if new_net_uuid != nic.network:
3674
      changes.append(("nic.network/%d" % idx, new_net))
3675
      nic.network = new_net_uuid
3676

    
3677
    if private.filled:
3678
      nic.nicparams = private.filled
3679

    
3680
      for (key, val) in nic.nicparams.items():
3681
        changes.append(("nic.%s/%d" % (key, idx), val))
3682

    
3683
    if self.op.hotplug:
3684
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3685
                                constants.HOTPLUG_TARGET_NIC,
3686
                                nic, None, idx)
3687
      changes.append(("nic/%d" % idx, msg))
3688

    
3689
    return changes
3690

    
3691
  def _RemoveNic(self, idx, nic, _):
3692
    if self.op.hotplug:
3693
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3694
                                 constants.HOTPLUG_TARGET_NIC,
3695
                                 nic, None, idx)
3696

    
3697
  def Exec(self, feedback_fn):
3698
    """Modifies an instance.
3699

3700
    All parameters take effect only at the next restart of the instance.
3701

3702
    """
3703
    # Process here the warnings from CheckPrereq, as we don't have a
3704
    # feedback_fn there.
3705
    # TODO: Replace with self.LogWarning
3706
    for warn in self.warn:
3707
      feedback_fn("WARNING: %s" % warn)
3708

    
3709
    assert ((self.op.disk_template is None) ^
3710
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3711
      "Not owning any node resource locks"
3712

    
3713
    result = []
3714

    
3715
    # New primary node
3716
    if self.op.pnode_uuid:
3717
      self.instance.primary_node = self.op.pnode_uuid
3718

    
3719
    # runtime memory
3720
    if self.op.runtime_mem:
3721
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3722
                                                     self.instance,
3723
                                                     self.op.runtime_mem)
3724
      rpcres.Raise("Cannot modify instance runtime memory")
3725
      result.append(("runtime_memory", self.op.runtime_mem))
3726

    
3727
    # Apply disk changes
3728
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3729
                        self._CreateNewDisk, self._ModifyDisk,
3730
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3731
    _UpdateIvNames(0, self.instance.disks)
3732

    
3733
    if self.op.disk_template:
3734
      if __debug__:
3735
        check_nodes = set(self.instance.all_nodes)
3736
        if self.op.remote_node_uuid:
3737
          check_nodes.add(self.op.remote_node_uuid)
3738
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3739
          owned = self.owned_locks(level)
3740
          assert not (check_nodes - owned), \
3741
            ("Not owning the correct locks, owning %r, expected at least %r" %
3742
             (owned, check_nodes))
3743

    
3744
      r_shut = ShutdownInstanceDisks(self, self.instance)
3745
      if not r_shut:
3746
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3747
                                 " proceed with disk template conversion")
3748
      mode = (self.instance.disk_template, self.op.disk_template)
3749
      try:
3750
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3751
      except:
3752
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3753
        raise
3754
      result.append(("disk_template", self.op.disk_template))
3755

    
3756
      assert self.instance.disk_template == self.op.disk_template, \
3757
        ("Expected disk template '%s', found '%s'" %
3758
         (self.op.disk_template, self.instance.disk_template))
3759

    
3760
    # Release node and resource locks if there are any (they might already have
3761
    # been released during disk conversion)
3762
    ReleaseLocks(self, locking.LEVEL_NODE)
3763
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3764

    
3765
    # Apply NIC changes
3766
    if self._new_nics is not None:
3767
      self.instance.nics = self._new_nics
3768
      result.extend(self._nic_chgdesc)
3769

    
3770
    # hvparams changes
3771
    if self.op.hvparams:
3772
      self.instance.hvparams = self.hv_inst
3773
      for key, val in self.op.hvparams.iteritems():
3774
        result.append(("hv/%s" % key, val))
3775

    
3776
    # beparams changes
3777
    if self.op.beparams:
3778
      self.instance.beparams = self.be_inst
3779
      for key, val in self.op.beparams.iteritems():
3780
        result.append(("be/%s" % key, val))
3781

    
3782
    # OS change
3783
    if self.op.os_name:
3784
      self.instance.os = self.op.os_name
3785

    
3786
    # osparams changes
3787
    if self.op.osparams:
3788
      self.instance.osparams = self.os_inst
3789
      for key, val in self.op.osparams.iteritems():
3790
        result.append(("os/%s" % key, val))
3791

    
3792
    if self.op.osparams_private:
3793
      self.instance.osparams_private = self.os_inst_private
3794
      for key, val in self.op.osparams_private.iteritems():
3795
        # Show the Private(...) blurb.
3796
        result.append(("os_private/%s" % key, repr(val)))
3797

    
3798
    if self.op.offline is None:
3799
      # Ignore
3800
      pass
3801
    elif self.op.offline:
3802
      # Mark instance as offline
3803
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3804
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3805
    else:
3806
      # Mark instance as online, but stopped
3807
      self.cfg.MarkInstanceDown(self.instance.uuid)
3808
      result.append(("admin_state", constants.ADMINST_DOWN))
3809

    
3810
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3811

    
3812
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3813
                self.owned_locks(locking.LEVEL_NODE)), \
3814
      "All node locks should have been released by now"
3815

    
3816
    return result
3817

    
3818
  _DISK_CONVERSIONS = {
3819
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3820
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3821
    }
3822

    
3823

    
3824
class LUInstanceChangeGroup(LogicalUnit):
3825
  HPATH = "instance-change-group"
3826
  HTYPE = constants.HTYPE_INSTANCE
3827
  REQ_BGL = False
3828

    
3829
  def ExpandNames(self):
3830
    self.share_locks = ShareAll()
3831

    
3832
    self.needed_locks = {
3833
      locking.LEVEL_NODEGROUP: [],
3834
      locking.LEVEL_NODE: [],
3835
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3836
      }
3837

    
3838
    self._ExpandAndLockInstance()
3839

    
3840
    if self.op.target_groups:
3841
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3842
                                  self.op.target_groups)
3843
    else:
3844
      self.req_target_uuids = None
3845

    
3846
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3847

    
3848
  def DeclareLocks(self, level):
3849
    if level == locking.LEVEL_NODEGROUP:
3850
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3851

    
3852
      if self.req_target_uuids:
3853
        lock_groups = set(self.req_target_uuids)
3854

    
3855
        # Lock all groups used by instance optimistically; this requires going
3856
        # via the node before it's locked, requiring verification later on
3857
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3858
        lock_groups.update(instance_groups)
3859
      else:
3860
        # No target groups, need to lock all of them
3861
        lock_groups = locking.ALL_SET
3862

    
3863
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3864

    
3865
    elif level == locking.LEVEL_NODE:
3866
      if self.req_target_uuids:
3867
        # Lock all nodes used by instances
3868
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3869
        self._LockInstancesNodes()
3870

    
3871
        # Lock all nodes in all potential target groups
3872
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3873
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3874
        member_nodes = [node_uuid
3875
                        for group in lock_groups
3876
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3877
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3878
      else:
3879
        # Lock all nodes as all groups are potential targets
3880
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3881

    
3882
  def CheckPrereq(self):
3883
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3884
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3885
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3886

    
3887
    assert (self.req_target_uuids is None or
3888
            owned_groups.issuperset(self.req_target_uuids))
3889
    assert owned_instance_names == set([self.op.instance_name])
3890

    
3891
    # Get instance information
3892
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3893

    
3894
    # Check if node groups for locked instance are still correct
3895
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3896
      ("Instance %s's nodes changed while we kept the lock" %
3897
       self.op.instance_name)
3898

    
3899
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3900
                                          owned_groups)
3901

    
3902
    if self.req_target_uuids:
3903
      # User requested specific target groups
3904
      self.target_uuids = frozenset(self.req_target_uuids)
3905
    else:
3906
      # All groups except those used by the instance are potential targets
3907
      self.target_uuids = owned_groups - inst_groups
3908

    
3909
    conflicting_groups = self.target_uuids & inst_groups
3910
    if conflicting_groups:
3911
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3912
                                 " used by the instance '%s'" %
3913
                                 (utils.CommaJoin(conflicting_groups),
3914
                                  self.op.instance_name),
3915
                                 errors.ECODE_INVAL)
3916

    
3917
    if not self.target_uuids:
3918
      raise errors.OpPrereqError("There are no possible target groups",
3919
                                 errors.ECODE_INVAL)
3920

    
3921
  def BuildHooksEnv(self):
3922
    """Build hooks env.
3923

3924
    """
3925
    assert self.target_uuids
3926

    
3927
    env = {
3928
      "TARGET_GROUPS": " ".join(self.target_uuids),
3929
      }
3930

    
3931
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3932

    
3933
    return env
3934

    
3935
  def BuildHooksNodes(self):
3936
    """Build hooks nodes.
3937

3938
    """
3939
    mn = self.cfg.GetMasterNode()
3940
    return ([mn], [mn])
3941

    
3942
  def Exec(self, feedback_fn):
3943
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3944

    
3945
    assert instances == [self.op.instance_name], "Instance not locked"
3946

    
3947
    req = iallocator.IAReqGroupChange(instances=instances,
3948
                                      target_groups=list(self.target_uuids))
3949
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3950

    
3951
    ial.Run(self.op.iallocator)
3952

    
3953
    if not ial.success:
3954
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3955
                                 " instance '%s' using iallocator '%s': %s" %
3956
                                 (self.op.instance_name, self.op.iallocator,
3957
                                  ial.info), errors.ECODE_NORES)
3958

    
3959
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3960

    
3961
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3962
                 " instance '%s'", len(jobs), self.op.instance_name)
3963

    
3964
    return ResultWithJobs(jobs)