Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 87ed6b79

History | View | Annotate | Download (157.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
from ganeti import ssh
42
import ganeti.rpc.node as rpc
43
from ganeti import utils
44

    
45
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46

    
47
from ganeti.cmdlib.common import INSTANCE_DOWN, \
48
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
49
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
50
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
51
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \
52
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
53
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
54
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
55
from ganeti.cmdlib.instance_storage import CreateDisks, \
56
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
57
  WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
58
  ComputeDisks, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
59
  GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
60
  AssembleInstanceDisks, CheckSpindlesExclusiveStorage
61
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
62
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
63
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
64
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
65
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
66
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
67

    
68
import ganeti.masterd.instance
69

    
70

    
71
#: Type description for changes as returned by L{_ApplyContainerMods}'s
72
#: callbacks
73
_TApplyContModsCbChanges = \
74
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
75
    ht.TNonEmptyString,
76
    ht.TAny,
77
    ])))
78

    
79

    
80
def _CheckHostnameSane(lu, name):
81
  """Ensures that a given hostname resolves to a 'sane' name.
82

83
  The given name is required to be a prefix of the resolved hostname,
84
  to prevent accidental mismatches.
85

86
  @param lu: the logical unit on behalf of which we're checking
87
  @param name: the name we should resolve and check
88
  @return: the resolved hostname object
89

90
  """
91
  hostname = netutils.GetHostname(name=name)
92
  if hostname.name != name:
93
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
94
  if not utils.MatchNameComponent(name, [hostname.name]):
95
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
96
                                " same as given hostname '%s'") %
97
                               (hostname.name, name), errors.ECODE_INVAL)
98
  return hostname
99

    
100

    
101
def _CheckOpportunisticLocking(op):
102
  """Generate error if opportunistic locking is not possible.
103

104
  """
105
  if op.opportunistic_locking and not op.iallocator:
106
    raise errors.OpPrereqError("Opportunistic locking is only available in"
107
                               " combination with an instance allocator",
108
                               errors.ECODE_INVAL)
109

    
110

    
111
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
112
  """Wrapper around IAReqInstanceAlloc.
113

114
  @param op: The instance opcode
115
  @param disks: The computed disks
116
  @param nics: The computed nics
117
  @param beparams: The full filled beparams
118
  @param node_name_whitelist: List of nodes which should appear as online to the
119
    allocator (unless the node is already marked offline)
120

121
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
122

123
  """
124
  spindle_use = beparams[constants.BE_SPINDLE_USE]
125
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
126
                                       disk_template=op.disk_template,
127
                                       tags=op.tags,
128
                                       os=op.os_type,
129
                                       vcpus=beparams[constants.BE_VCPUS],
130
                                       memory=beparams[constants.BE_MAXMEM],
131
                                       spindle_use=spindle_use,
132
                                       disks=disks,
133
                                       nics=[n.ToDict() for n in nics],
134
                                       hypervisor=op.hypervisor,
135
                                       node_whitelist=node_name_whitelist)
136

    
137

    
138
def _ComputeFullBeParams(op, cluster):
139
  """Computes the full beparams.
140

141
  @param op: The instance opcode
142
  @param cluster: The cluster config object
143

144
  @return: The fully filled beparams
145

146
  """
147
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
148
  for param, value in op.beparams.iteritems():
149
    if value == constants.VALUE_AUTO:
150
      op.beparams[param] = default_beparams[param]
151
  objects.UpgradeBeParams(op.beparams)
152
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
153
  return cluster.SimpleFillBE(op.beparams)
154

    
155

    
156
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
157
  """Computes the nics.
158

159
  @param op: The instance opcode
160
  @param cluster: Cluster configuration object
161
  @param default_ip: The default ip to assign
162
  @param cfg: An instance of the configuration object
163
  @param ec_id: Execution context ID
164

165
  @returns: The build up nics
166

167
  """
168
  nics = []
169
  for nic in op.nics:
170
    nic_mode_req = nic.get(constants.INIC_MODE, None)
171
    nic_mode = nic_mode_req
172
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
173
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
174

    
175
    net = nic.get(constants.INIC_NETWORK, None)
176
    link = nic.get(constants.NIC_LINK, None)
177
    ip = nic.get(constants.INIC_IP, None)
178
    vlan = nic.get(constants.INIC_VLAN, None)
179

    
180
    if net is None or net.lower() == constants.VALUE_NONE:
181
      net = None
182
    else:
183
      if nic_mode_req is not None or link is not None:
184
        raise errors.OpPrereqError("If network is given, no mode or link"
185
                                   " is allowed to be passed",
186
                                   errors.ECODE_INVAL)
187

    
188
    # ip validity checks
189
    if ip is None or ip.lower() == constants.VALUE_NONE:
190
      nic_ip = None
191
    elif ip.lower() == constants.VALUE_AUTO:
192
      if not op.name_check:
193
        raise errors.OpPrereqError("IP address set to auto but name checks"
194
                                   " have been skipped",
195
                                   errors.ECODE_INVAL)
196
      nic_ip = default_ip
197
    else:
198
      # We defer pool operations until later, so that the iallocator has
199
      # filled in the instance's node(s) dimara
200
      if ip.lower() == constants.NIC_IP_POOL:
201
        if net is None:
202
          raise errors.OpPrereqError("if ip=pool, parameter network"
203
                                     " must be passed too",
204
                                     errors.ECODE_INVAL)
205

    
206
      elif not netutils.IPAddress.IsValid(ip):
207
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
208
                                   errors.ECODE_INVAL)
209

    
210
      nic_ip = ip
211

    
212
    # TODO: check the ip address for uniqueness
213
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
214
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
215
                                 errors.ECODE_INVAL)
216

    
217
    # MAC address verification
218
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
219
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
220
      mac = utils.NormalizeAndValidateMac(mac)
221

    
222
      try:
223
        # TODO: We need to factor this out
224
        cfg.ReserveMAC(mac, ec_id)
225
      except errors.ReservationError:
226
        raise errors.OpPrereqError("MAC address %s already in use"
227
                                   " in cluster" % mac,
228
                                   errors.ECODE_NOTUNIQUE)
229

    
230
    #  Build nic parameters
231
    nicparams = {}
232
    if nic_mode_req:
233
      nicparams[constants.NIC_MODE] = nic_mode
234
    if link:
235
      nicparams[constants.NIC_LINK] = link
236
    if vlan:
237
      nicparams[constants.NIC_VLAN] = vlan
238

    
239
    check_params = cluster.SimpleFillNIC(nicparams)
240
    objects.NIC.CheckParameterSyntax(check_params)
241
    net_uuid = cfg.LookupNetwork(net)
242
    name = nic.get(constants.INIC_NAME, None)
243
    if name is not None and name.lower() == constants.VALUE_NONE:
244
      name = None
245
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
246
                          network=net_uuid, nicparams=nicparams)
247
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
248
    nics.append(nic_obj)
249

    
250
  return nics
251

    
252

    
253
def _CheckForConflictingIp(lu, ip, node_uuid):
254
  """In case of conflicting IP address raise error.
255

256
  @type ip: string
257
  @param ip: IP address
258
  @type node_uuid: string
259
  @param node_uuid: node UUID
260

261
  """
262
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
263
  if conf_net is not None:
264
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
265
                                " network %s, but the target NIC does not." %
266
                                (ip, conf_net)),
267
                               errors.ECODE_STATE)
268

    
269
  return (None, None)
270

    
271

    
272
def _ComputeIPolicyInstanceSpecViolation(
273
  ipolicy, instance_spec, disk_template,
274
  _compute_fn=ComputeIPolicySpecViolation):
275
  """Compute if instance specs meets the specs of ipolicy.
276

277
  @type ipolicy: dict
278
  @param ipolicy: The ipolicy to verify against
279
  @param instance_spec: dict
280
  @param instance_spec: The instance spec to verify
281
  @type disk_template: string
282
  @param disk_template: the disk template of the instance
283
  @param _compute_fn: The function to verify ipolicy (unittest only)
284
  @see: L{ComputeIPolicySpecViolation}
285

286
  """
287
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
288
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
289
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
290
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
291
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
292
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
293

    
294
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
295
                     disk_sizes, spindle_use, disk_template)
296

    
297

    
298
def _ComputeInstanceCommunicationNIC(instance_name):
299
  """Compute the name of the instance NIC used by instance
300
  communication.
301

302
  With instance communication, a new NIC is added to the instance.
303
  This NIC has a special name that identities it as being part of
304
  instance communication, and not just a normal NIC.  This function
305
  generates the name of the NIC based on a prefix and the instance
306
  name
307

308
  @type instance_name: string
309
  @param instance_name: name of the instance the NIC belongs to
310

311
  @rtype: string
312
  @return: name of the NIC
313

314
  """
315
  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
316

    
317

    
318
class LUInstanceCreate(LogicalUnit):
319
  """Create an instance.
320

321
  """
322
  HPATH = "instance-add"
323
  HTYPE = constants.HTYPE_INSTANCE
324
  REQ_BGL = False
325

    
326
  def _CheckDiskTemplateValid(self):
327
    """Checks validity of disk template.
328

329
    """
330
    cluster = self.cfg.GetClusterInfo()
331
    if self.op.disk_template is None:
332
      # FIXME: It would be better to take the default disk template from the
333
      # ipolicy, but for the ipolicy we need the primary node, which we get from
334
      # the iallocator, which wants the disk template as input. To solve this
335
      # chicken-and-egg problem, it should be possible to specify just a node
336
      # group from the iallocator and take the ipolicy from that.
337
      self.op.disk_template = cluster.enabled_disk_templates[0]
338
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339

    
340
  def _CheckDiskArguments(self):
341
    """Checks validity of disk-related arguments.
342

343
    """
344
    # check that disk's names are unique and valid
345
    utils.ValidateDeviceNames("disk", self.op.disks)
346

    
347
    self._CheckDiskTemplateValid()
348

    
349
    # check disks. parameter names and consistent adopt/no-adopt strategy
350
    has_adopt = has_no_adopt = False
351
    for disk in self.op.disks:
352
      if self.op.disk_template != constants.DT_EXT:
353
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354
      if constants.IDISK_ADOPT in disk:
355
        has_adopt = True
356
      else:
357
        has_no_adopt = True
358
    if has_adopt and has_no_adopt:
359
      raise errors.OpPrereqError("Either all disks are adopted or none is",
360
                                 errors.ECODE_INVAL)
361
    if has_adopt:
362
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363
        raise errors.OpPrereqError("Disk adoption is not supported for the"
364
                                   " '%s' disk template" %
365
                                   self.op.disk_template,
366
                                   errors.ECODE_INVAL)
367
      if self.op.iallocator is not None:
368
        raise errors.OpPrereqError("Disk adoption not allowed with an"
369
                                   " iallocator script", errors.ECODE_INVAL)
370
      if self.op.mode == constants.INSTANCE_IMPORT:
371
        raise errors.OpPrereqError("Disk adoption not allowed for"
372
                                   " instance import", errors.ECODE_INVAL)
373
    else:
374
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
375
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376
                                   " but no 'adopt' parameter given" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379

    
380
    self.adopt_disks = has_adopt
381

    
382
  def _CheckVLANArguments(self):
383
    """ Check validity of VLANs if given
384

385
    """
386
    for nic in self.op.nics:
387
      vlan = nic.get(constants.INIC_VLAN, None)
388
      if vlan:
389
        if vlan[0] == ".":
390
          # vlan starting with dot means single untagged vlan,
391
          # might be followed by trunk (:)
392
          if not vlan[1:].isdigit():
393
            vlanlist = vlan[1:].split(':')
394
            for vl in vlanlist:
395
              if not vl.isdigit():
396
                raise errors.OpPrereqError("Specified VLAN parameter is "
397
                                           "invalid : %s" % vlan,
398
                                             errors.ECODE_INVAL)
399
        elif vlan[0] == ":":
400
          # Trunk - tagged only
401
          vlanlist = vlan[1:].split(':')
402
          for vl in vlanlist:
403
            if not vl.isdigit():
404
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
405
                                           " : %s" % vlan, errors.ECODE_INVAL)
406
        elif vlan.isdigit():
407
          # This is the simplest case. No dots, only single digit
408
          # -> Create untagged access port, dot needs to be added
409
          nic[constants.INIC_VLAN] = "." + vlan
410
        else:
411
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
412
                                       " : %s" % vlan, errors.ECODE_INVAL)
413

    
414
  def CheckArguments(self):
415
    """Check arguments.
416

417
    """
418
    # do not require name_check to ease forward/backward compatibility
419
    # for tools
420
    if self.op.no_install and self.op.start:
421
      self.LogInfo("No-installation mode selected, disabling startup")
422
      self.op.start = False
423
    # validate/normalize the instance name
424
    self.op.instance_name = \
425
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
426

    
427
    if self.op.ip_check and not self.op.name_check:
428
      # TODO: make the ip check more flexible and not depend on the name check
429
      raise errors.OpPrereqError("Cannot do IP address check without a name"
430
                                 " check", errors.ECODE_INVAL)
431

    
432
    # add NIC for instance communication
433
    if self.op.instance_communication:
434
      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
435

    
436
      self.op.nics.append({constants.INIC_NAME: nic_name,
437
                           constants.INIC_MAC: constants.VALUE_GENERATE,
438
                           constants.INIC_IP: constants.NIC_IP_POOL,
439
                           constants.INIC_NETWORK:
440
                             self.cfg.GetInstanceCommunicationNetwork()})
441

    
442
    # check nics' parameter names
443
    for nic in self.op.nics:
444
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
445
    # check that NIC's parameters names are unique and valid
446
    utils.ValidateDeviceNames("NIC", self.op.nics)
447

    
448
    self._CheckVLANArguments()
449

    
450
    self._CheckDiskArguments()
451
    assert self.op.disk_template is not None
452

    
453
    # instance name verification
454
    if self.op.name_check:
455
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
456
      self.op.instance_name = self.hostname.name
457
      # used in CheckPrereq for ip ping check
458
      self.check_ip = self.hostname.ip
459
    else:
460
      self.check_ip = None
461

    
462
    # file storage checks
463
    if (self.op.file_driver and
464
        not self.op.file_driver in constants.FILE_DRIVER):
465
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
466
                                 self.op.file_driver, errors.ECODE_INVAL)
467

    
468
    # set default file_driver if unset and required
469
    if (not self.op.file_driver and
470
        self.op.disk_template in constants.DTS_FILEBASED):
471
      self.op.file_driver = constants.FD_LOOP
472

    
473
    ### Node/iallocator related checks
474
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
475

    
476
    if self.op.pnode is not None:
477
      if self.op.disk_template in constants.DTS_INT_MIRROR:
478
        if self.op.snode is None:
479
          raise errors.OpPrereqError("The networked disk templates need"
480
                                     " a mirror node", errors.ECODE_INVAL)
481
      elif self.op.snode:
482
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
483
                        " template")
484
        self.op.snode = None
485

    
486
    _CheckOpportunisticLocking(self.op)
487

    
488
    if self.op.mode == constants.INSTANCE_IMPORT:
489
      # On import force_variant must be True, because if we forced it at
490
      # initial install, our only chance when importing it back is that it
491
      # works again!
492
      self.op.force_variant = True
493

    
494
      if self.op.no_install:
495
        self.LogInfo("No-installation mode has no effect during import")
496

    
497
      if objects.GetOSImage(self.op.osparams):
498
        self.LogInfo("OS image has no effect during import")
499
    elif self.op.mode == constants.INSTANCE_CREATE:
500
      os_image = CheckOSImage(self.op)
501

    
502
      if self.op.os_type is None and os_image is None:
503
        raise errors.OpPrereqError("No guest OS or OS image specified",
504
                                   errors.ECODE_INVAL)
505

    
506
      if self.op.os_type is not None \
507
            and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
508
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
509
                                   " installation" % self.op.os_type,
510
                                   errors.ECODE_STATE)
511
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
512
      if objects.GetOSImage(self.op.osparams):
513
        self.LogInfo("OS image has no effect during import")
514

    
515
      self._cds = GetClusterDomainSecret()
516

    
517
      # Check handshake to ensure both clusters have the same domain secret
518
      src_handshake = self.op.source_handshake
519
      if not src_handshake:
520
        raise errors.OpPrereqError("Missing source handshake",
521
                                   errors.ECODE_INVAL)
522

    
523
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
524
                                                           src_handshake)
525
      if errmsg:
526
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
527
                                   errors.ECODE_INVAL)
528

    
529
      # Load and check source CA
530
      self.source_x509_ca_pem = self.op.source_x509_ca
531
      if not self.source_x509_ca_pem:
532
        raise errors.OpPrereqError("Missing source X509 CA",
533
                                   errors.ECODE_INVAL)
534

    
535
      try:
536
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
537
                                                    self._cds)
538
      except OpenSSL.crypto.Error, err:
539
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
540
                                   (err, ), errors.ECODE_INVAL)
541

    
542
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
543
      if errcode is not None:
544
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
545
                                   errors.ECODE_INVAL)
546

    
547
      self.source_x509_ca = cert
548

    
549
      src_instance_name = self.op.source_instance_name
550
      if not src_instance_name:
551
        raise errors.OpPrereqError("Missing source instance name",
552
                                   errors.ECODE_INVAL)
553

    
554
      self.source_instance_name = \
555
        netutils.GetHostname(name=src_instance_name).name
556

    
557
    else:
558
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
559
                                 self.op.mode, errors.ECODE_INVAL)
560

    
561
  def ExpandNames(self):
562
    """ExpandNames for CreateInstance.
563

564
    Figure out the right locks for instance creation.
565

566
    """
567
    self.needed_locks = {}
568

    
569
    # this is just a preventive check, but someone might still add this
570
    # instance in the meantime, and creation will fail at lock-add time
571
    if self.op.instance_name in\
572
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
573
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
574
                                 self.op.instance_name, errors.ECODE_EXISTS)
575

    
576
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
577

    
578
    if self.op.iallocator:
579
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
580
      # specifying a group on instance creation and then selecting nodes from
581
      # that group
582
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
583
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
584

    
585
      if self.op.opportunistic_locking:
586
        self.opportunistic_locks[locking.LEVEL_NODE] = True
587
    else:
588
      (self.op.pnode_uuid, self.op.pnode) = \
589
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
590
      nodelist = [self.op.pnode_uuid]
591
      if self.op.snode is not None:
592
        (self.op.snode_uuid, self.op.snode) = \
593
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
594
        nodelist.append(self.op.snode_uuid)
595
      self.needed_locks[locking.LEVEL_NODE] = nodelist
596

    
597
    # in case of import lock the source node too
598
    if self.op.mode == constants.INSTANCE_IMPORT:
599
      src_node = self.op.src_node
600
      src_path = self.op.src_path
601

    
602
      if src_path is None:
603
        self.op.src_path = src_path = self.op.instance_name
604

    
605
      if src_node is None:
606
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
607
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
608
        self.op.src_node = None
609
        if os.path.isabs(src_path):
610
          raise errors.OpPrereqError("Importing an instance from a path"
611
                                     " requires a source node option",
612
                                     errors.ECODE_INVAL)
613
      else:
614
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
615
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
616
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
617
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
618
        if not os.path.isabs(src_path):
619
          self.op.src_path = \
620
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
621

    
622
    self.needed_locks[locking.LEVEL_NODE_RES] = \
623
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
624

    
625
    # Optimistically acquire shared group locks (we're reading the
626
    # configuration).  We can't just call GetInstanceNodeGroups, because the
627
    # instance doesn't exist yet. Therefore we lock all node groups of all
628
    # nodes we have.
629
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
630
      # In the case we lock all nodes for opportunistic allocation, we have no
631
      # choice than to lock all groups, because they're allocated before nodes.
632
      # This is sad, but true. At least we release all those we don't need in
633
      # CheckPrereq later.
634
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
635
    else:
636
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
637
        list(self.cfg.GetNodeGroupsFromNodes(
638
          self.needed_locks[locking.LEVEL_NODE]))
639
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
640

    
641
  def DeclareLocks(self, level):
642
    if level == locking.LEVEL_NODE_RES and \
643
      self.opportunistic_locks[locking.LEVEL_NODE]:
644
      # Even when using opportunistic locking, we require the same set of
645
      # NODE_RES locks as we got NODE locks
646
      self.needed_locks[locking.LEVEL_NODE_RES] = \
647
        self.owned_locks(locking.LEVEL_NODE)
648

    
649
  def _RunAllocator(self):
650
    """Run the allocator based on input opcode.
651

652
    """
653
    if self.op.opportunistic_locking:
654
      # Only consider nodes for which a lock is held
655
      node_name_whitelist = self.cfg.GetNodeNames(
656
        self.owned_locks(locking.LEVEL_NODE))
657
    else:
658
      node_name_whitelist = None
659

    
660
    req = _CreateInstanceAllocRequest(self.op, self.disks,
661
                                      self.nics, self.be_full,
662
                                      node_name_whitelist)
663
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
664

    
665
    ial.Run(self.op.iallocator)
666

    
667
    if not ial.success:
668
      # When opportunistic locks are used only a temporary failure is generated
669
      if self.op.opportunistic_locking:
670
        ecode = errors.ECODE_TEMP_NORES
671
      else:
672
        ecode = errors.ECODE_NORES
673

    
674
      raise errors.OpPrereqError("Can't compute nodes using"
675
                                 " iallocator '%s': %s" %
676
                                 (self.op.iallocator, ial.info),
677
                                 ecode)
678

    
679
    (self.op.pnode_uuid, self.op.pnode) = \
680
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
681
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
682
                 self.op.instance_name, self.op.iallocator,
683
                 utils.CommaJoin(ial.result))
684

    
685
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
686

    
687
    if req.RequiredNodes() == 2:
688
      (self.op.snode_uuid, self.op.snode) = \
689
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
690

    
691
  def BuildHooksEnv(self):
692
    """Build hooks env.
693

694
    This runs on master, primary and secondary nodes of the instance.
695

696
    """
697
    env = {
698
      "ADD_MODE": self.op.mode,
699
      }
700
    if self.op.mode == constants.INSTANCE_IMPORT:
701
      env["SRC_NODE"] = self.op.src_node
702
      env["SRC_PATH"] = self.op.src_path
703
      env["SRC_IMAGES"] = self.src_images
704

    
705
    env.update(BuildInstanceHookEnv(
706
      name=self.op.instance_name,
707
      primary_node_name=self.op.pnode,
708
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
709
      status=self.op.start,
710
      os_type=self.op.os_type,
711
      minmem=self.be_full[constants.BE_MINMEM],
712
      maxmem=self.be_full[constants.BE_MAXMEM],
713
      vcpus=self.be_full[constants.BE_VCPUS],
714
      nics=NICListToTuple(self, self.nics),
715
      disk_template=self.op.disk_template,
716
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
717
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
718
             for d in self.disks],
719
      bep=self.be_full,
720
      hvp=self.hv_full,
721
      hypervisor_name=self.op.hypervisor,
722
      tags=self.op.tags,
723
      ))
724

    
725
    return env
726

    
727
  def BuildHooksNodes(self):
728
    """Build hooks nodes.
729

730
    """
731
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
732
    return nl, nl
733

    
734
  def _ReadExportInfo(self):
735
    """Reads the export information from disk.
736

737
    It will override the opcode source node and path with the actual
738
    information, if these two were not specified before.
739

740
    @return: the export information
741

742
    """
743
    assert self.op.mode == constants.INSTANCE_IMPORT
744

    
745
    if self.op.src_node_uuid is None:
746
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
747
      exp_list = self.rpc.call_export_list(locked_nodes)
748
      found = False
749
      for node_uuid in exp_list:
750
        if exp_list[node_uuid].fail_msg:
751
          continue
752
        if self.op.src_path in exp_list[node_uuid].payload:
753
          found = True
754
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
755
          self.op.src_node_uuid = node_uuid
756
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
757
                                            self.op.src_path)
758
          break
759
      if not found:
760
        raise errors.OpPrereqError("No export found for relative path %s" %
761
                                   self.op.src_path, errors.ECODE_INVAL)
762

    
763
    CheckNodeOnline(self, self.op.src_node_uuid)
764
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
765
    result.Raise("No export or invalid export found in dir %s" %
766
                 self.op.src_path)
767

    
768
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
769
    if not export_info.has_section(constants.INISECT_EXP):
770
      raise errors.ProgrammerError("Corrupted export config",
771
                                   errors.ECODE_ENVIRON)
772

    
773
    ei_version = export_info.get(constants.INISECT_EXP, "version")
774
    if int(ei_version) != constants.EXPORT_VERSION:
775
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
776
                                 (ei_version, constants.EXPORT_VERSION),
777
                                 errors.ECODE_ENVIRON)
778
    return export_info
779

    
780
  def _ReadExportParams(self, einfo):
781
    """Use export parameters as defaults.
782

783
    In case the opcode doesn't specify (as in override) some instance
784
    parameters, then try to use them from the export information, if
785
    that declares them.
786

787
    """
788
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
789

    
790
    if not self.op.disks:
791
      disks = []
792
      # TODO: import the disk iv_name too
793
      for idx in range(constants.MAX_DISKS):
794
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
795
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
796
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
797
          disk = {
798
            constants.IDISK_SIZE: disk_sz,
799
            constants.IDISK_NAME: disk_name
800
            }
801
          disks.append(disk)
802
      self.op.disks = disks
803
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
804
        raise errors.OpPrereqError("No disk info specified and the export"
805
                                   " is missing the disk information",
806
                                   errors.ECODE_INVAL)
807

    
808
    if not self.op.nics:
809
      nics = []
810
      for idx in range(constants.MAX_NICS):
811
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
812
          ndict = {}
813
          for name in [constants.INIC_IP,
814
                       constants.INIC_MAC, constants.INIC_NAME]:
815
            nic_param_name = "nic%d_%s" % (idx, name)
816
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
817
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
818
              ndict[name] = v
819
          network = einfo.get(constants.INISECT_INS,
820
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
821
          # in case network is given link and mode are inherited
822
          # from nodegroup's netparams and thus should not be passed here
823
          if network:
824
            ndict[constants.INIC_NETWORK] = network
825
          else:
826
            for name in list(constants.NICS_PARAMETERS):
827
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
828
              ndict[name] = v
829
          nics.append(ndict)
830
        else:
831
          break
832
      self.op.nics = nics
833

    
834
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
835
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
836

    
837
    if (self.op.hypervisor is None and
838
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
839
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
840

    
841
    if einfo.has_section(constants.INISECT_HYP):
842
      # use the export parameters but do not override the ones
843
      # specified by the user
844
      for name, value in einfo.items(constants.INISECT_HYP):
845
        if name not in self.op.hvparams:
846
          self.op.hvparams[name] = value
847

    
848
    if einfo.has_section(constants.INISECT_BEP):
849
      # use the parameters, without overriding
850
      for name, value in einfo.items(constants.INISECT_BEP):
851
        if name not in self.op.beparams:
852
          self.op.beparams[name] = value
853
        # Compatibility for the old "memory" be param
854
        if name == constants.BE_MEMORY:
855
          if constants.BE_MAXMEM not in self.op.beparams:
856
            self.op.beparams[constants.BE_MAXMEM] = value
857
          if constants.BE_MINMEM not in self.op.beparams:
858
            self.op.beparams[constants.BE_MINMEM] = value
859
    else:
860
      # try to read the parameters old style, from the main section
861
      for name in constants.BES_PARAMETERS:
862
        if (name not in self.op.beparams and
863
            einfo.has_option(constants.INISECT_INS, name)):
864
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
865

    
866
    if einfo.has_section(constants.INISECT_OSP):
867
      # use the parameters, without overriding
868
      for name, value in einfo.items(constants.INISECT_OSP):
869
        if name not in self.op.osparams:
870
          self.op.osparams[name] = value
871

    
872
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
873
      # use the parameters, without overriding
874
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
875
        if name not in self.op.osparams_private:
876
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
877

    
878
  def _RevertToDefaults(self, cluster):
879
    """Revert the instance parameters to the default values.
880

881
    """
882
    # hvparams
883
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
884
    for name in self.op.hvparams.keys():
885
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
886
        del self.op.hvparams[name]
887
    # beparams
888
    be_defs = cluster.SimpleFillBE({})
889
    for name in self.op.beparams.keys():
890
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
891
        del self.op.beparams[name]
892
    # nic params
893
    nic_defs = cluster.SimpleFillNIC({})
894
    for nic in self.op.nics:
895
      for name in constants.NICS_PARAMETERS:
896
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
897
          del nic[name]
898
    # osparams
899
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
900
    for name in self.op.osparams.keys():
901
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
902
        del self.op.osparams[name]
903

    
904
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
905
                                    os_params_private={})
906
    for name in self.op.osparams_private.keys():
907
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
908
        del self.op.osparams_private[name]
909

    
910
  def _CalculateFileStorageDir(self):
911
    """Calculate final instance file storage dir.
912

913
    """
914
    # file storage dir calculation/check
915
    self.instance_file_storage_dir = None
916
    if self.op.disk_template in constants.DTS_FILEBASED:
917
      # build the full file storage dir path
918
      joinargs = []
919

    
920
      cfg_storage = None
921
      if self.op.disk_template == constants.DT_FILE:
922
        cfg_storage = self.cfg.GetFileStorageDir()
923
      elif self.op.disk_template == constants.DT_SHARED_FILE:
924
        cfg_storage = self.cfg.GetSharedFileStorageDir()
925
      elif self.op.disk_template == constants.DT_GLUSTER:
926
        cfg_storage = self.cfg.GetGlusterStorageDir()
927

    
928
      if not cfg_storage:
929
        raise errors.OpPrereqError(
930
          "Cluster file storage dir for {tpl} storage type not defined".format(
931
            tpl=repr(self.op.disk_template)
932
          ),
933
          errors.ECODE_STATE
934
      )
935

    
936
      joinargs.append(cfg_storage)
937

    
938
      if self.op.file_storage_dir is not None:
939
        joinargs.append(self.op.file_storage_dir)
940

    
941
      if self.op.disk_template != constants.DT_GLUSTER:
942
        joinargs.append(self.op.instance_name)
943

    
944
      if len(joinargs) > 1:
945
        # pylint: disable=W0142
946
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
947
      else:
948
        self.instance_file_storage_dir = joinargs[0]
949

    
950
  def CheckPrereq(self): # pylint: disable=R0914
951
    """Check prerequisites.
952

953
    """
954
    # Check that the optimistically acquired groups are correct wrt the
955
    # acquired nodes
956
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
957
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
958
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
959
    if not owned_groups.issuperset(cur_groups):
960
      raise errors.OpPrereqError("New instance %s's node groups changed since"
961
                                 " locks were acquired, current groups are"
962
                                 " are '%s', owning groups '%s'; retry the"
963
                                 " operation" %
964
                                 (self.op.instance_name,
965
                                  utils.CommaJoin(cur_groups),
966
                                  utils.CommaJoin(owned_groups)),
967
                                 errors.ECODE_STATE)
968

    
969
    self._CalculateFileStorageDir()
970

    
971
    if self.op.mode == constants.INSTANCE_IMPORT:
972
      export_info = self._ReadExportInfo()
973
      self._ReadExportParams(export_info)
974
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
975
    else:
976
      self._old_instance_name = None
977

    
978
    if (not self.cfg.GetVGName() and
979
        self.op.disk_template not in constants.DTS_NOT_LVM):
980
      raise errors.OpPrereqError("Cluster does not support lvm-based"
981
                                 " instances", errors.ECODE_STATE)
982

    
983
    if (self.op.hypervisor is None or
984
        self.op.hypervisor == constants.VALUE_AUTO):
985
      self.op.hypervisor = self.cfg.GetHypervisorType()
986

    
987
    cluster = self.cfg.GetClusterInfo()
988
    enabled_hvs = cluster.enabled_hypervisors
989
    if self.op.hypervisor not in enabled_hvs:
990
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
991
                                 " cluster (%s)" %
992
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
993
                                 errors.ECODE_STATE)
994

    
995
    # Check tag validity
996
    for tag in self.op.tags:
997
      objects.TaggableObject.ValidateTag(tag)
998

    
999
    # check hypervisor parameter syntax (locally)
1000
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
1001
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
1002
                                      self.op.hvparams)
1003
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
1004
    hv_type.CheckParameterSyntax(filled_hvp)
1005
    self.hv_full = filled_hvp
1006
    # check that we don't specify global parameters on an instance
1007
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
1008
                         "instance", "cluster")
1009

    
1010
    # fill and remember the beparams dict
1011
    self.be_full = _ComputeFullBeParams(self.op, cluster)
1012

    
1013
    # build os parameters
1014
    if self.op.osparams_private is None:
1015
      self.op.osparams_private = serializer.PrivateDict()
1016
    if self.op.osparams_secret is None:
1017
      self.op.osparams_secret = serializer.PrivateDict()
1018

    
1019
    self.os_full = cluster.SimpleFillOS(
1020
      self.op.os_type,
1021
      self.op.osparams,
1022
      os_params_private=self.op.osparams_private,
1023
      os_params_secret=self.op.osparams_secret
1024
    )
1025

    
1026
    # now that hvp/bep are in final format, let's reset to defaults,
1027
    # if told to do so
1028
    if self.op.identify_defaults:
1029
      self._RevertToDefaults(cluster)
1030

    
1031
    # NIC buildup
1032
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1033
                             self.proc.GetECId())
1034

    
1035
    # disk checks/pre-build
1036
    default_vg = self.cfg.GetVGName()
1037
    self.disks = ComputeDisks(self.op, default_vg)
1038

    
1039
    if self.op.mode == constants.INSTANCE_IMPORT:
1040
      disk_images = []
1041
      for idx in range(len(self.disks)):
1042
        option = "disk%d_dump" % idx
1043
        if export_info.has_option(constants.INISECT_INS, option):
1044
          # FIXME: are the old os-es, disk sizes, etc. useful?
1045
          export_name = export_info.get(constants.INISECT_INS, option)
1046
          image = utils.PathJoin(self.op.src_path, export_name)
1047
          disk_images.append(image)
1048
        else:
1049
          disk_images.append(False)
1050

    
1051
      self.src_images = disk_images
1052

    
1053
      if self.op.instance_name == self._old_instance_name:
1054
        for idx, nic in enumerate(self.nics):
1055
          if nic.mac == constants.VALUE_AUTO:
1056
            nic_mac_ini = "nic%d_mac" % idx
1057
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1058

    
1059
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1060

    
1061
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1062
    if self.op.ip_check:
1063
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1064
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1065
                                   (self.check_ip, self.op.instance_name),
1066
                                   errors.ECODE_NOTUNIQUE)
1067

    
1068
    #### mac address generation
1069
    # By generating here the mac address both the allocator and the hooks get
1070
    # the real final mac address rather than the 'auto' or 'generate' value.
1071
    # There is a race condition between the generation and the instance object
1072
    # creation, which means that we know the mac is valid now, but we're not
1073
    # sure it will be when we actually add the instance. If things go bad
1074
    # adding the instance will abort because of a duplicate mac, and the
1075
    # creation job will fail.
1076
    for nic in self.nics:
1077
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1078
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1079

    
1080
    #### allocator run
1081

    
1082
    if self.op.iallocator is not None:
1083
      self._RunAllocator()
1084

    
1085
    # Release all unneeded node locks
1086
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1087
                               self.op.src_node_uuid])
1088
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1089
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1090
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1091
    # Release all unneeded group locks
1092
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1093
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1094

    
1095
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1096
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1097
      "Node locks differ from node resource locks"
1098

    
1099
    #### node related checks
1100

    
1101
    # check primary node
1102
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1103
    assert self.pnode is not None, \
1104
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1105
    if pnode.offline:
1106
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1107
                                 pnode.name, errors.ECODE_STATE)
1108
    if pnode.drained:
1109
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1110
                                 pnode.name, errors.ECODE_STATE)
1111
    if not pnode.vm_capable:
1112
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1113
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1114

    
1115
    self.secondaries = []
1116

    
1117
    # Fill in any IPs from IP pools. This must happen here, because we need to
1118
    # know the nic's primary node, as specified by the iallocator
1119
    for idx, nic in enumerate(self.nics):
1120
      net_uuid = nic.network
1121
      if net_uuid is not None:
1122
        nobj = self.cfg.GetNetwork(net_uuid)
1123
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1124
        if netparams is None:
1125
          raise errors.OpPrereqError("No netparams found for network"
1126
                                     " %s. Probably not connected to"
1127
                                     " node's %s nodegroup" %
1128
                                     (nobj.name, self.pnode.name),
1129
                                     errors.ECODE_INVAL)
1130
        self.LogInfo("NIC/%d inherits netparams %s" %
1131
                     (idx, netparams.values()))
1132
        nic.nicparams = dict(netparams)
1133
        if nic.ip is not None:
1134
          if nic.ip.lower() == constants.NIC_IP_POOL:
1135
            try:
1136
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1137
            except errors.ReservationError:
1138
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1139
                                         " from the address pool" % idx,
1140
                                         errors.ECODE_STATE)
1141
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1142
          else:
1143
            try:
1144
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1145
                                 check=self.op.conflicts_check)
1146
            except errors.ReservationError:
1147
              raise errors.OpPrereqError("IP address %s already in use"
1148
                                         " or does not belong to network %s" %
1149
                                         (nic.ip, nobj.name),
1150
                                         errors.ECODE_NOTUNIQUE)
1151

    
1152
      # net is None, ip None or given
1153
      elif self.op.conflicts_check:
1154
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1155

    
1156
    # mirror node verification
1157
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1158
      if self.op.snode_uuid == pnode.uuid:
1159
        raise errors.OpPrereqError("The secondary node cannot be the"
1160
                                   " primary node", errors.ECODE_INVAL)
1161
      CheckNodeOnline(self, self.op.snode_uuid)
1162
      CheckNodeNotDrained(self, self.op.snode_uuid)
1163
      CheckNodeVmCapable(self, self.op.snode_uuid)
1164
      self.secondaries.append(self.op.snode_uuid)
1165

    
1166
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1167
      if pnode.group != snode.group:
1168
        self.LogWarning("The primary and secondary nodes are in two"
1169
                        " different node groups; the disk parameters"
1170
                        " from the first disk's node group will be"
1171
                        " used")
1172

    
1173
    nodes = [pnode]
1174
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1175
      nodes.append(snode)
1176
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1177
    excl_stor = compat.any(map(has_es, nodes))
1178
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1179
      raise errors.OpPrereqError("Disk template %s not supported with"
1180
                                 " exclusive storage" % self.op.disk_template,
1181
                                 errors.ECODE_STATE)
1182
    for disk in self.disks:
1183
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1184

    
1185
    node_uuids = [pnode.uuid] + self.secondaries
1186

    
1187
    if not self.adopt_disks:
1188
      if self.op.disk_template == constants.DT_RBD:
1189
        # _CheckRADOSFreeSpace() is just a placeholder.
1190
        # Any function that checks prerequisites can be placed here.
1191
        # Check if there is enough space on the RADOS cluster.
1192
        CheckRADOSFreeSpace()
1193
      elif self.op.disk_template == constants.DT_EXT:
1194
        # FIXME: Function that checks prereqs if needed
1195
        pass
1196
      elif self.op.disk_template in constants.DTS_LVM:
1197
        # Check lv size requirements, if not adopting
1198
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1199
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1200
      else:
1201
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1202
        pass
1203

    
1204
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1205
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1206
                                disk[constants.IDISK_ADOPT])
1207
                     for disk in self.disks])
1208
      if len(all_lvs) != len(self.disks):
1209
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1210
                                   errors.ECODE_INVAL)
1211
      for lv_name in all_lvs:
1212
        try:
1213
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1214
          # to ReserveLV uses the same syntax
1215
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1216
        except errors.ReservationError:
1217
          raise errors.OpPrereqError("LV named %s used by another instance" %
1218
                                     lv_name, errors.ECODE_NOTUNIQUE)
1219

    
1220
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1221
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1222

    
1223
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1224
                                       vg_names.payload.keys())[pnode.uuid]
1225
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1226
      node_lvs = node_lvs.payload
1227

    
1228
      delta = all_lvs.difference(node_lvs.keys())
1229
      if delta:
1230
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1231
                                   utils.CommaJoin(delta),
1232
                                   errors.ECODE_INVAL)
1233
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1234
      if online_lvs:
1235
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1236
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1237
                                   errors.ECODE_STATE)
1238
      # update the size of disk based on what is found
1239
      for dsk in self.disks:
1240
        dsk[constants.IDISK_SIZE] = \
1241
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1242
                                        dsk[constants.IDISK_ADOPT])][0]))
1243

    
1244
    elif self.op.disk_template == constants.DT_BLOCK:
1245
      # Normalize and de-duplicate device paths
1246
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1247
                       for disk in self.disks])
1248
      if len(all_disks) != len(self.disks):
1249
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1250
                                   errors.ECODE_INVAL)
1251
      baddisks = [d for d in all_disks
1252
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1253
      if baddisks:
1254
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1255
                                   " cannot be adopted" %
1256
                                   (utils.CommaJoin(baddisks),
1257
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1258
                                   errors.ECODE_INVAL)
1259

    
1260
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1261
                                            list(all_disks))[pnode.uuid]
1262
      node_disks.Raise("Cannot get block device information from node %s" %
1263
                       pnode.name)
1264
      node_disks = node_disks.payload
1265
      delta = all_disks.difference(node_disks.keys())
1266
      if delta:
1267
        raise errors.OpPrereqError("Missing block device(s): %s" %
1268
                                   utils.CommaJoin(delta),
1269
                                   errors.ECODE_INVAL)
1270
      for dsk in self.disks:
1271
        dsk[constants.IDISK_SIZE] = \
1272
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1273

    
1274
    # Check disk access param to be compatible with specified hypervisor
1275
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1276
    node_group = self.cfg.GetNodeGroup(node_info.group)
1277
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1278
    access_type = disk_params[self.op.disk_template].get(
1279
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1280
    )
1281

    
1282
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1283
                                            self.op.disk_template,
1284
                                            access_type):
1285
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1286
                                 " used with %s disk access param" %
1287
                                 (self.op.hypervisor, access_type),
1288
                                  errors.ECODE_STATE)
1289

    
1290
    # Verify instance specs
1291
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1292
    ispec = {
1293
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1294
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1295
      constants.ISPEC_DISK_COUNT: len(self.disks),
1296
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1297
                                  for disk in self.disks],
1298
      constants.ISPEC_NIC_COUNT: len(self.nics),
1299
      constants.ISPEC_SPINDLE_USE: spindle_use,
1300
      }
1301

    
1302
    group_info = self.cfg.GetNodeGroup(pnode.group)
1303
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1304
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1305
                                               self.op.disk_template)
1306
    if not self.op.ignore_ipolicy and res:
1307
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1308
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1309
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1310

    
1311
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1312

    
1313
    if self.op.os_type is not None:
1314
      CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1315

    
1316
    # check OS parameters (remotely)
1317
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1318

    
1319
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1320

    
1321
    #TODO: _CheckExtParams (remotely)
1322
    # Check parameters for extstorage
1323

    
1324
    # memory check on primary node
1325
    #TODO(dynmem): use MINMEM for checking
1326
    if self.op.start:
1327
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1328
                                self.op.hvparams)
1329
      CheckNodeFreeMemory(self, self.pnode.uuid,
1330
                          "creating instance %s" % self.op.instance_name,
1331
                          self.be_full[constants.BE_MAXMEM],
1332
                          self.op.hypervisor, hvfull)
1333

    
1334
    self.dry_run_result = list(node_uuids)
1335

    
1336
  def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
1337
    """Removes degraded disks and instance.
1338

1339
    It optionally checks whether disks are degraded.  If the disks are
1340
    degraded, they are removed and the instance is also removed from
1341
    the configuration.
1342

1343
    If L{disk_abort} is True, then the disks are considered degraded
1344
    and removed, and the instance is removed from the configuration.
1345

1346
    If L{disk_abort} is False, then it first checks whether disks are
1347
    degraded and, if so, it removes the disks and the instance is
1348
    removed from the configuration.
1349

1350
    @type feedback_fn: callable
1351
    @param feedback_fn: function used send feedback back to the caller
1352

1353
    @type disk_abort: boolean
1354
    @param disk_abort:
1355
      True if disks are degraded, False to first check if disks are
1356
      degraded
1357

1358
    @type instance: L{objects.Instance}
1359
    @param instance: instance containing the disks to check
1360

1361
    @rtype: NoneType
1362
    @return: None
1363
    @raise errors.OpPrereqError: if disks are degraded
1364

1365
    """
1366
    if disk_abort:
1367
      pass
1368
    elif self.op.wait_for_sync:
1369
      disk_abort = not WaitForSync(self, instance)
1370
    elif instance.disk_template in constants.DTS_INT_MIRROR:
1371
      # make sure the disks are not degraded (still sync-ing is ok)
1372
      feedback_fn("* checking mirrors status")
1373
      disk_abort = not WaitForSync(self, instance, oneshot=True)
1374
    else:
1375
      disk_abort = False
1376

    
1377
    if disk_abort:
1378
      RemoveDisks(self, instance)
1379
      self.cfg.RemoveInstance(instance.uuid)
1380
      # Make sure the instance lock gets removed
1381
      self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
1382
      raise errors.OpExecError("There are some degraded disks for"
1383
                               " this instance")
1384

    
1385
  def Exec(self, feedback_fn):
1386
    """Create and add the instance to the cluster.
1387

1388
    """
1389
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1390
                self.owned_locks(locking.LEVEL_NODE)), \
1391
      "Node locks differ from node resource locks"
1392

    
1393
    ht_kind = self.op.hypervisor
1394
    if ht_kind in constants.HTS_REQ_PORT:
1395
      network_port = self.cfg.AllocatePort()
1396
    else:
1397
      network_port = None
1398

    
1399
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1400

    
1401
    # This is ugly but we got a chicken-egg problem here
1402
    # We can only take the group disk parameters, as the instance
1403
    # has no disks yet (we are generating them right here).
1404
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1405
    disks = GenerateDiskTemplate(self,
1406
                                 self.op.disk_template,
1407
                                 instance_uuid, self.pnode.uuid,
1408
                                 self.secondaries,
1409
                                 self.disks,
1410
                                 self.instance_file_storage_dir,
1411
                                 self.op.file_driver,
1412
                                 0,
1413
                                 feedback_fn,
1414
                                 self.cfg.GetGroupDiskParams(nodegroup))
1415

    
1416
    if self.op.os_type is None:
1417
      os_type = ""
1418
    else:
1419
      os_type = self.op.os_type
1420

    
1421
    iobj = objects.Instance(name=self.op.instance_name,
1422
                            uuid=instance_uuid,
1423
                            os=os_type,
1424
                            primary_node=self.pnode.uuid,
1425
                            nics=self.nics, disks=disks,
1426
                            disk_template=self.op.disk_template,
1427
                            disks_active=False,
1428
                            admin_state=constants.ADMINST_DOWN,
1429
                            network_port=network_port,
1430
                            beparams=self.op.beparams,
1431
                            hvparams=self.op.hvparams,
1432
                            hypervisor=self.op.hypervisor,
1433
                            osparams=self.op.osparams,
1434
                            osparams_private=self.op.osparams_private,
1435
                            )
1436

    
1437
    if self.op.tags:
1438
      for tag in self.op.tags:
1439
        iobj.AddTag(tag)
1440

    
1441
    if self.adopt_disks:
1442
      if self.op.disk_template == constants.DT_PLAIN:
1443
        # rename LVs to the newly-generated names; we need to construct
1444
        # 'fake' LV disks with the old data, plus the new unique_id
1445
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1446
        rename_to = []
1447
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1448
          rename_to.append(t_dsk.logical_id)
1449
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1450
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1451
                                               zip(tmp_disks, rename_to))
1452
        result.Raise("Failed to rename adoped LVs")
1453
    else:
1454
      feedback_fn("* creating instance disks...")
1455
      try:
1456
        CreateDisks(self, iobj)
1457
      except errors.OpExecError:
1458
        self.LogWarning("Device creation failed")
1459
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1460
        raise
1461

    
1462
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1463

    
1464
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1465

    
1466
    # Declare that we don't want to remove the instance lock anymore, as we've
1467
    # added the instance to the config
1468
    del self.remove_locks[locking.LEVEL_INSTANCE]
1469

    
1470
    if self.op.mode == constants.INSTANCE_IMPORT:
1471
      # Release unused nodes
1472
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1473
    else:
1474
      # Release all nodes
1475
      ReleaseLocks(self, locking.LEVEL_NODE)
1476

    
1477
    # Wipe disks
1478
    disk_abort = False
1479
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1480
      feedback_fn("* wiping instance disks...")
1481
      try:
1482
        WipeDisks(self, iobj)
1483
      except errors.OpExecError, err:
1484
        logging.exception("Wiping disks failed")
1485
        self.LogWarning("Wiping instance disks failed (%s)", err)
1486
        disk_abort = True
1487

    
1488
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1489

    
1490
    # Image disks
1491
    os_image = objects.GetOSImage(iobj.osparams)
1492
    disk_abort = False
1493

    
1494
    if not self.adopt_disks and os_image is not None:
1495
      master = self.cfg.GetMasterNode()
1496

    
1497
      if not utils.IsUrl(os_image) and master != self.pnode.uuid:
1498
        ssh_port = self.pnode.ndparams.get(constants.ND_SSH_PORT)
1499
        srun = ssh.SshRunner(self.cfg.GetClusterName())
1500
        srun.CopyFileToNode(self.pnode.name, ssh_port, os_image)
1501

    
1502
      feedback_fn("* imaging instance disks...")
1503
      try:
1504
        ImageDisks(self, iobj, os_image)
1505
      except errors.OpExecError, err:
1506
        logging.exception("Imaging disks failed")
1507
        self.LogWarning("Imaging instance disks failed (%s)", err)
1508
        disk_abort = True
1509

    
1510
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1511

    
1512
    # instance disks are now active
1513
    iobj.disks_active = True
1514

    
1515
    # Release all node resource locks
1516
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1517

    
1518
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1519
      if self.op.mode == constants.INSTANCE_CREATE:
1520
        os_image = objects.GetOSImage(self.op.osparams)
1521

    
1522
        if os_image is None and not self.op.no_install:
1523
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1524
                        not self.op.wait_for_sync)
1525
          if pause_sync:
1526
            feedback_fn("* pausing disk sync to install instance OS")
1527
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1528
                                                              (iobj.disks,
1529
                                                               iobj), True)
1530
            for idx, success in enumerate(result.payload):
1531
              if not success:
1532
                logging.warn("pause-sync of instance %s for disk %d failed",
1533
                             self.op.instance_name, idx)
1534

    
1535
          feedback_fn("* running the instance OS create scripts...")
1536
          # FIXME: pass debug option from opcode to backend
1537
          os_add_result = \
1538
            self.rpc.call_instance_os_add(self.pnode.uuid,
1539
                                          (iobj, self.op.osparams_secret),
1540
                                          False,
1541
                                          self.op.debug_level)
1542
          if pause_sync:
1543
            feedback_fn("* resuming disk sync")
1544
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1545
                                                              (iobj.disks,
1546
                                                               iobj), False)
1547
            for idx, success in enumerate(result.payload):
1548
              if not success:
1549
                logging.warn("resume-sync of instance %s for disk %d failed",
1550
                             self.op.instance_name, idx)
1551

    
1552
          os_add_result.Raise("Could not add os for instance %s"
1553
                              " on node %s" % (self.op.instance_name,
1554
                                               self.pnode.name))
1555

    
1556
      else:
1557
        if self.op.mode == constants.INSTANCE_IMPORT:
1558
          feedback_fn("* running the instance OS import scripts...")
1559

    
1560
          transfers = []
1561

    
1562
          for idx, image in enumerate(self.src_images):
1563
            if not image:
1564
              continue
1565

    
1566
            # FIXME: pass debug option from opcode to backend
1567
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1568
                                               constants.IEIO_FILE, (image, ),
1569
                                               constants.IEIO_SCRIPT,
1570
                                               ((iobj.disks[idx], iobj), idx),
1571
                                               None)
1572
            transfers.append(dt)
1573

    
1574
          import_result = \
1575
            masterd.instance.TransferInstanceData(self, feedback_fn,
1576
                                                  self.op.src_node_uuid,
1577
                                                  self.pnode.uuid,
1578
                                                  self.pnode.secondary_ip,
1579
                                                  self.op.compress,
1580
                                                  iobj, transfers)
1581
          if not compat.all(import_result):
1582
            self.LogWarning("Some disks for instance %s on node %s were not"
1583
                            " imported successfully" % (self.op.instance_name,
1584
                                                        self.pnode.name))
1585

    
1586
          rename_from = self._old_instance_name
1587

    
1588
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1589
          feedback_fn("* preparing remote import...")
1590
          # The source cluster will stop the instance before attempting to make
1591
          # a connection. In some cases stopping an instance can take a long
1592
          # time, hence the shutdown timeout is added to the connection
1593
          # timeout.
1594
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1595
                             self.op.source_shutdown_timeout)
1596
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1597

    
1598
          assert iobj.primary_node == self.pnode.uuid
1599
          disk_results = \
1600
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1601
                                          self.source_x509_ca,
1602
                                          self._cds, self.op.compress, timeouts)
1603
          if not compat.all(disk_results):
1604
            # TODO: Should the instance still be started, even if some disks
1605
            # failed to import (valid for local imports, too)?
1606
            self.LogWarning("Some disks for instance %s on node %s were not"
1607
                            " imported successfully" % (self.op.instance_name,
1608
                                                        self.pnode.name))
1609

    
1610
          rename_from = self.source_instance_name
1611

    
1612
        else:
1613
          # also checked in the prereq part
1614
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1615
                                       % self.op.mode)
1616

    
1617
        # Run rename script on newly imported instance
1618
        assert iobj.name == self.op.instance_name
1619
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1620
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1621
                                                   rename_from,
1622
                                                   self.op.debug_level)
1623
        result.Warn("Failed to run rename script for %s on node %s" %
1624
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1625

    
1626
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1627

    
1628
    if self.op.start:
1629
      iobj.admin_state = constants.ADMINST_UP
1630
      self.cfg.Update(iobj, feedback_fn)
1631
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1632
                   self.pnode.name)
1633
      feedback_fn("* starting instance...")
1634
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1635
                                            False, self.op.reason)
1636
      result.Raise("Could not start instance")
1637

    
1638
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1639

    
1640

    
1641
class LUInstanceRename(LogicalUnit):
1642
  """Rename an instance.
1643

1644
  """
1645
  HPATH = "instance-rename"
1646
  HTYPE = constants.HTYPE_INSTANCE
1647

    
1648
  def CheckArguments(self):
1649
    """Check arguments.
1650

1651
    """
1652
    if self.op.ip_check and not self.op.name_check:
1653
      # TODO: make the ip check more flexible and not depend on the name check
1654
      raise errors.OpPrereqError("IP address check requires a name check",
1655
                                 errors.ECODE_INVAL)
1656

    
1657
  def BuildHooksEnv(self):
1658
    """Build hooks env.
1659

1660
    This runs on master, primary and secondary nodes of the instance.
1661

1662
    """
1663
    env = BuildInstanceHookEnvByObject(self, self.instance)
1664
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1665
    return env
1666

    
1667
  def BuildHooksNodes(self):
1668
    """Build hooks nodes.
1669

1670
    """
1671
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1672
    return (nl, nl)
1673

    
1674
  def CheckPrereq(self):
1675
    """Check prerequisites.
1676

1677
    This checks that the instance is in the cluster and is not running.
1678

1679
    """
1680
    (self.op.instance_uuid, self.op.instance_name) = \
1681
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1682
                                self.op.instance_name)
1683
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1684
    assert instance is not None
1685

    
1686
    # It should actually not happen that an instance is running with a disabled
1687
    # disk template, but in case it does, the renaming of file-based instances
1688
    # will fail horribly. Thus, we test it before.
1689
    if (instance.disk_template in constants.DTS_FILEBASED and
1690
        self.op.new_name != instance.name):
1691
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1692
                               instance.disk_template)
1693

    
1694
    CheckNodeOnline(self, instance.primary_node)
1695
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1696
                       msg="cannot rename")
1697
    self.instance = instance
1698

    
1699
    new_name = self.op.new_name
1700
    if self.op.name_check:
1701
      hostname = _CheckHostnameSane(self, new_name)
1702
      new_name = self.op.new_name = hostname.name
1703
      if (self.op.ip_check and
1704
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1705
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1706
                                   (hostname.ip, new_name),
1707
                                   errors.ECODE_NOTUNIQUE)
1708

    
1709
    instance_names = [inst.name for
1710
                      inst in self.cfg.GetAllInstancesInfo().values()]
1711
    if new_name in instance_names and new_name != instance.name:
1712
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1713
                                 new_name, errors.ECODE_EXISTS)
1714

    
1715
  def Exec(self, feedback_fn):
1716
    """Rename the instance.
1717

1718
    """
1719
    old_name = self.instance.name
1720

    
1721
    rename_file_storage = False
1722
    if (self.instance.disk_template in (constants.DT_FILE,
1723
                                        constants.DT_SHARED_FILE) and
1724
        self.op.new_name != self.instance.name):
1725
      old_file_storage_dir = os.path.dirname(
1726
                               self.instance.disks[0].logical_id[1])
1727
      rename_file_storage = True
1728

    
1729
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1730
    # Change the instance lock. This is definitely safe while we hold the BGL.
1731
    # Otherwise the new lock would have to be added in acquired mode.
1732
    assert self.REQ_BGL
1733
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1734

    
1735
    # re-read the instance from the configuration after rename
1736
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1737

    
1738
    if rename_file_storage:
1739
      new_file_storage_dir = os.path.dirname(
1740
                               renamed_inst.disks[0].logical_id[1])
1741
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1742
                                                     old_file_storage_dir,
1743
                                                     new_file_storage_dir)
1744
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1745
                   " (but the instance has been renamed in Ganeti)" %
1746
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1747
                    old_file_storage_dir, new_file_storage_dir))
1748

    
1749
    StartInstanceDisks(self, renamed_inst, None)
1750
    # update info on disks
1751
    info = GetInstanceInfoText(renamed_inst)
1752
    for (idx, disk) in enumerate(renamed_inst.disks):
1753
      for node_uuid in renamed_inst.all_nodes:
1754
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1755
                                                (disk, renamed_inst), info)
1756
        result.Warn("Error setting info on node %s for disk %s" %
1757
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1758
    try:
1759
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1760
                                                 renamed_inst, old_name,
1761
                                                 self.op.debug_level)
1762
      result.Warn("Could not run OS rename script for instance %s on node %s"
1763
                  " (but the instance has been renamed in Ganeti)" %
1764
                  (renamed_inst.name,
1765
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1766
                  self.LogWarning)
1767
    finally:
1768
      ShutdownInstanceDisks(self, renamed_inst)
1769

    
1770
    return renamed_inst.name
1771

    
1772

    
1773
class LUInstanceRemove(LogicalUnit):
1774
  """Remove an instance.
1775

1776
  """
1777
  HPATH = "instance-remove"
1778
  HTYPE = constants.HTYPE_INSTANCE
1779
  REQ_BGL = False
1780

    
1781
  def ExpandNames(self):
1782
    self._ExpandAndLockInstance()
1783
    self.needed_locks[locking.LEVEL_NODE] = []
1784
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1785
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1786

    
1787
  def DeclareLocks(self, level):
1788
    if level == locking.LEVEL_NODE:
1789
      self._LockInstancesNodes()
1790
    elif level == locking.LEVEL_NODE_RES:
1791
      # Copy node locks
1792
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1793
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1794

    
1795
  def BuildHooksEnv(self):
1796
    """Build hooks env.
1797

1798
    This runs on master, primary and secondary nodes of the instance.
1799

1800
    """
1801
    env = BuildInstanceHookEnvByObject(self, self.instance)
1802
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1803
    return env
1804

    
1805
  def BuildHooksNodes(self):
1806
    """Build hooks nodes.
1807

1808
    """
1809
    nl = [self.cfg.GetMasterNode()]
1810
    nl_post = list(self.instance.all_nodes) + nl
1811
    return (nl, nl_post)
1812

    
1813
  def CheckPrereq(self):
1814
    """Check prerequisites.
1815

1816
    This checks that the instance is in the cluster.
1817

1818
    """
1819
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1820
    assert self.instance is not None, \
1821
      "Cannot retrieve locked instance %s" % self.op.instance_name
1822

    
1823
  def Exec(self, feedback_fn):
1824
    """Remove the instance.
1825

1826
    """
1827
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1828
                 self.cfg.GetNodeName(self.instance.primary_node))
1829

    
1830
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1831
                                             self.instance,
1832
                                             self.op.shutdown_timeout,
1833
                                             self.op.reason)
1834
    if self.op.ignore_failures:
1835
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1836
    else:
1837
      result.Raise("Could not shutdown instance %s on node %s" %
1838
                   (self.instance.name,
1839
                    self.cfg.GetNodeName(self.instance.primary_node)))
1840

    
1841
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1842
            self.owned_locks(locking.LEVEL_NODE_RES))
1843
    assert not (set(self.instance.all_nodes) -
1844
                self.owned_locks(locking.LEVEL_NODE)), \
1845
      "Not owning correct locks"
1846

    
1847
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1848

    
1849

    
1850
class LUInstanceMove(LogicalUnit):
1851
  """Move an instance by data-copying.
1852

1853
  """
1854
  HPATH = "instance-move"
1855
  HTYPE = constants.HTYPE_INSTANCE
1856
  REQ_BGL = False
1857

    
1858
  def ExpandNames(self):
1859
    self._ExpandAndLockInstance()
1860
    (self.op.target_node_uuid, self.op.target_node) = \
1861
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1862
                            self.op.target_node)
1863
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1864
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1865
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1866

    
1867
  def DeclareLocks(self, level):
1868
    if level == locking.LEVEL_NODE:
1869
      self._LockInstancesNodes(primary_only=True)
1870
    elif level == locking.LEVEL_NODE_RES:
1871
      # Copy node locks
1872
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1873
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1874

    
1875
  def BuildHooksEnv(self):
1876
    """Build hooks env.
1877

1878
    This runs on master, primary and target nodes of the instance.
1879

1880
    """
1881
    env = {
1882
      "TARGET_NODE": self.op.target_node,
1883
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1884
      }
1885
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1886
    return env
1887

    
1888
  def BuildHooksNodes(self):
1889
    """Build hooks nodes.
1890

1891
    """
1892
    nl = [
1893
      self.cfg.GetMasterNode(),
1894
      self.instance.primary_node,
1895
      self.op.target_node_uuid,
1896
      ]
1897
    return (nl, nl)
1898

    
1899
  def CheckPrereq(self):
1900
    """Check prerequisites.
1901

1902
    This checks that the instance is in the cluster.
1903

1904
    """
1905
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1906
    assert self.instance is not None, \
1907
      "Cannot retrieve locked instance %s" % self.op.instance_name
1908

    
1909
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1910
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1911
                                 self.instance.disk_template,
1912
                                 errors.ECODE_STATE)
1913

    
1914
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1915
    assert target_node is not None, \
1916
      "Cannot retrieve locked node %s" % self.op.target_node
1917

    
1918
    self.target_node_uuid = target_node.uuid
1919
    if target_node.uuid == self.instance.primary_node:
1920
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1921
                                 (self.instance.name, target_node.name),
1922
                                 errors.ECODE_STATE)
1923

    
1924
    cluster = self.cfg.GetClusterInfo()
1925
    bep = cluster.FillBE(self.instance)
1926

    
1927
    for idx, dsk in enumerate(self.instance.disks):
1928
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1929
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1930
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1931
                                   " cannot copy" % idx, errors.ECODE_STATE)
1932

    
1933
    CheckNodeOnline(self, target_node.uuid)
1934
    CheckNodeNotDrained(self, target_node.uuid)
1935
    CheckNodeVmCapable(self, target_node.uuid)
1936
    group_info = self.cfg.GetNodeGroup(target_node.group)
1937
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1938
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1939
                           ignore=self.op.ignore_ipolicy)
1940

    
1941
    if self.instance.admin_state == constants.ADMINST_UP:
1942
      # check memory requirements on the target node
1943
      CheckNodeFreeMemory(
1944
          self, target_node.uuid, "failing over instance %s" %
1945
          self.instance.name, bep[constants.BE_MAXMEM],
1946
          self.instance.hypervisor,
1947
          cluster.hvparams[self.instance.hypervisor])
1948
    else:
1949
      self.LogInfo("Not checking memory on the secondary node as"
1950
                   " instance will not be started")
1951

    
1952
    # check bridge existance
1953
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1954

    
1955
  def Exec(self, feedback_fn):
1956
    """Move an instance.
1957

1958
    The move is done by shutting it down on its present node, copying
1959
    the data over (slow) and starting it on the new node.
1960

1961
    """
1962
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1963
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1964

    
1965
    self.LogInfo("Shutting down instance %s on source node %s",
1966
                 self.instance.name, source_node.name)
1967

    
1968
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1969
            self.owned_locks(locking.LEVEL_NODE_RES))
1970

    
1971
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1972
                                             self.op.shutdown_timeout,
1973
                                             self.op.reason)
1974
    if self.op.ignore_consistency:
1975
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1976
                  " anyway. Please make sure node %s is down. Error details" %
1977
                  (self.instance.name, source_node.name, source_node.name),
1978
                  self.LogWarning)
1979
    else:
1980
      result.Raise("Could not shutdown instance %s on node %s" %
1981
                   (self.instance.name, source_node.name))
1982

    
1983
    # create the target disks
1984
    try:
1985
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1986
    except errors.OpExecError:
1987
      self.LogWarning("Device creation failed")
1988
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1989
      raise
1990

    
1991
    errs = []
1992
    transfers = []
1993
    # activate, get path, create transfer jobs
1994
    for idx, disk in enumerate(self.instance.disks):
1995
      # FIXME: pass debug option from opcode to backend
1996
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1997
                                         constants.IEIO_RAW_DISK,
1998
                                         (disk, self.instance),
1999
                                         constants.IEIO_RAW_DISK,
2000
                                         (disk, self.instance),
2001
                                         None)
2002
      transfers.append(dt)
2003

    
2004
    import_result = \
2005
      masterd.instance.TransferInstanceData(self, feedback_fn,
2006
                                            source_node.uuid,
2007
                                            target_node.uuid,
2008
                                            target_node.secondary_ip,
2009
                                            self.op.compress,
2010
                                            self.instance, transfers)
2011
    if not compat.all(import_result):
2012
      errs.append("Failed to transfer instance data")
2013

    
2014
    if errs:
2015
      self.LogWarning("Some disks failed to copy, aborting")
2016
      try:
2017
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
2018
      finally:
2019
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2020
        raise errors.OpExecError("Errors during disk copy: %s" %
2021
                                 (",".join(errs),))
2022

    
2023
    self.instance.primary_node = target_node.uuid
2024
    self.cfg.Update(self.instance, feedback_fn)
2025

    
2026
    self.LogInfo("Removing the disks on the original node")
2027
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
2028

    
2029
    # Only start the instance if it's marked as up
2030
    if self.instance.admin_state == constants.ADMINST_UP:
2031
      self.LogInfo("Starting instance %s on node %s",
2032
                   self.instance.name, target_node.name)
2033

    
2034
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
2035
                                          ignore_secondaries=True)
2036
      if not disks_ok:
2037
        ShutdownInstanceDisks(self, self.instance)
2038
        raise errors.OpExecError("Can't activate the instance's disks")
2039

    
2040
      result = self.rpc.call_instance_start(target_node.uuid,
2041
                                            (self.instance, None, None), False,
2042
                                            self.op.reason)
2043
      msg = result.fail_msg
2044
      if msg:
2045
        ShutdownInstanceDisks(self, self.instance)
2046
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
2047
                                 (self.instance.name, target_node.name, msg))
2048

    
2049

    
2050
class LUInstanceMultiAlloc(NoHooksLU):
2051
  """Allocates multiple instances at the same time.
2052

2053
  """
2054
  REQ_BGL = False
2055

    
2056
  def CheckArguments(self):
2057
    """Check arguments.
2058

2059
    """
2060
    nodes = []
2061
    for inst in self.op.instances:
2062
      if inst.iallocator is not None:
2063
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
2064
                                   " instance objects", errors.ECODE_INVAL)
2065
      nodes.append(bool(inst.pnode))
2066
      if inst.disk_template in constants.DTS_INT_MIRROR:
2067
        nodes.append(bool(inst.snode))
2068

    
2069
    has_nodes = compat.any(nodes)
2070
    if compat.all(nodes) ^ has_nodes:
2071
      raise errors.OpPrereqError("There are instance objects providing"
2072
                                 " pnode/snode while others do not",
2073
                                 errors.ECODE_INVAL)
2074

    
2075
    if not has_nodes and self.op.iallocator is None:
2076
      default_iallocator = self.cfg.GetDefaultIAllocator()
2077
      if default_iallocator:
2078
        self.op.iallocator = default_iallocator
2079
      else:
2080
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2081
                                   " given and no cluster-wide default"
2082
                                   " iallocator found; please specify either"
2083
                                   " an iallocator or nodes on the instances"
2084
                                   " or set a cluster-wide default iallocator",
2085
                                   errors.ECODE_INVAL)
2086

    
2087
    _CheckOpportunisticLocking(self.op)
2088

    
2089
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2090
    if dups:
2091
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2092
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2093

    
2094
  def ExpandNames(self):
2095
    """Calculate the locks.
2096

2097
    """
2098
    self.share_locks = ShareAll()
2099
    self.needed_locks = {
2100
      # iallocator will select nodes and even if no iallocator is used,
2101
      # collisions with LUInstanceCreate should be avoided
2102
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2103
      }
2104

    
2105
    if self.op.iallocator:
2106
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2107
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2108

    
2109
      if self.op.opportunistic_locking:
2110
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2111
    else:
2112
      nodeslist = []
2113
      for inst in self.op.instances:
2114
        (inst.pnode_uuid, inst.pnode) = \
2115
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2116
        nodeslist.append(inst.pnode_uuid)
2117
        if inst.snode is not None:
2118
          (inst.snode_uuid, inst.snode) = \
2119
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2120
          nodeslist.append(inst.snode_uuid)
2121

    
2122
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2123
      # Lock resources of instance's primary and secondary nodes (copy to
2124
      # prevent accidential modification)
2125
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2126

    
2127
  def DeclareLocks(self, level):
2128
    if level == locking.LEVEL_NODE_RES and \
2129
      self.opportunistic_locks[locking.LEVEL_NODE]:
2130
      # Even when using opportunistic locking, we require the same set of
2131
      # NODE_RES locks as we got NODE locks
2132
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2133
        self.owned_locks(locking.LEVEL_NODE)
2134

    
2135
  def CheckPrereq(self):
2136
    """Check prerequisite.
2137

2138
    """
2139
    if self.op.iallocator:
2140
      cluster = self.cfg.GetClusterInfo()
2141
      default_vg = self.cfg.GetVGName()
2142
      ec_id = self.proc.GetECId()
2143

    
2144
      if self.op.opportunistic_locking:
2145
        # Only consider nodes for which a lock is held
2146
        node_whitelist = self.cfg.GetNodeNames(
2147
                           list(self.owned_locks(locking.LEVEL_NODE)))
2148
      else:
2149
        node_whitelist = None
2150

    
2151
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2152
                                           _ComputeNics(op, cluster, None,
2153
                                                        self.cfg, ec_id),
2154
                                           _ComputeFullBeParams(op, cluster),
2155
                                           node_whitelist)
2156
               for op in self.op.instances]
2157

    
2158
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2159
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2160

    
2161
      ial.Run(self.op.iallocator)
2162

    
2163
      if not ial.success:
2164
        raise errors.OpPrereqError("Can't compute nodes using"
2165
                                   " iallocator '%s': %s" %
2166
                                   (self.op.iallocator, ial.info),
2167
                                   errors.ECODE_NORES)
2168

    
2169
      self.ia_result = ial.result
2170

    
2171
    if self.op.dry_run:
2172
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2173
        constants.JOB_IDS_KEY: [],
2174
        })
2175

    
2176
  def _ConstructPartialResult(self):
2177
    """Contructs the partial result.
2178

2179
    """
2180
    if self.op.iallocator:
2181
      (allocatable, failed_insts) = self.ia_result
2182
      allocatable_insts = map(compat.fst, allocatable)
2183
    else:
2184
      allocatable_insts = [op.instance_name for op in self.op.instances]
2185
      failed_insts = []
2186

    
2187
    return {
2188
      constants.ALLOCATABLE_KEY: allocatable_insts,
2189
      constants.FAILED_KEY: failed_insts,
2190
      }
2191

    
2192
  def Exec(self, feedback_fn):
2193
    """Executes the opcode.
2194

2195
    """
2196
    jobs = []
2197
    if self.op.iallocator:
2198
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2199
      (allocatable, failed) = self.ia_result
2200

    
2201
      for (name, node_names) in allocatable:
2202
        op = op2inst.pop(name)
2203

    
2204
        (op.pnode_uuid, op.pnode) = \
2205
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2206
        if len(node_names) > 1:
2207
          (op.snode_uuid, op.snode) = \
2208
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2209

    
2210
          jobs.append([op])
2211

    
2212
        missing = set(op2inst.keys()) - set(failed)
2213
        assert not missing, \
2214
          "Iallocator did return incomplete result: %s" % \
2215
          utils.CommaJoin(missing)
2216
    else:
2217
      jobs.extend([op] for op in self.op.instances)
2218

    
2219
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2220

    
2221

    
2222
class _InstNicModPrivate:
2223
  """Data structure for network interface modifications.
2224

2225
  Used by L{LUInstanceSetParams}.
2226

2227
  """
2228
  def __init__(self):
2229
    self.params = None
2230
    self.filled = None
2231

    
2232

    
2233
def _PrepareContainerMods(mods, private_fn):
2234
  """Prepares a list of container modifications by adding a private data field.
2235

2236
  @type mods: list of tuples; (operation, index, parameters)
2237
  @param mods: List of modifications
2238
  @type private_fn: callable or None
2239
  @param private_fn: Callable for constructing a private data field for a
2240
    modification
2241
  @rtype: list
2242

2243
  """
2244
  if private_fn is None:
2245
    fn = lambda: None
2246
  else:
2247
    fn = private_fn
2248

    
2249
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2250

    
2251

    
2252
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2253
  """Checks if nodes have enough physical CPUs
2254

2255
  This function checks if all given nodes have the needed number of
2256
  physical CPUs. In case any node has less CPUs or we cannot get the
2257
  information from the node, this function raises an OpPrereqError
2258
  exception.
2259

2260
  @type lu: C{LogicalUnit}
2261
  @param lu: a logical unit from which we get configuration data
2262
  @type node_uuids: C{list}
2263
  @param node_uuids: the list of node UUIDs to check
2264
  @type requested: C{int}
2265
  @param requested: the minimum acceptable number of physical CPUs
2266
  @type hypervisor_specs: list of pairs (string, dict of strings)
2267
  @param hypervisor_specs: list of hypervisor specifications in
2268
      pairs (hypervisor_name, hvparams)
2269
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2270
      or we cannot check the node
2271

2272
  """
2273
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2274
  for node_uuid in node_uuids:
2275
    info = nodeinfo[node_uuid]
2276
    node_name = lu.cfg.GetNodeName(node_uuid)
2277
    info.Raise("Cannot get current information from node %s" % node_name,
2278
               prereq=True, ecode=errors.ECODE_ENVIRON)
2279
    (_, _, (hv_info, )) = info.payload
2280
    num_cpus = hv_info.get("cpu_total", None)
2281
    if not isinstance(num_cpus, int):
2282
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2283
                                 " on node %s, result was '%s'" %
2284
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2285
    if requested > num_cpus:
2286
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2287
                                 "required" % (node_name, num_cpus, requested),
2288
                                 errors.ECODE_NORES)
2289

    
2290

    
2291
def GetItemFromContainer(identifier, kind, container):
2292
  """Return the item refered by the identifier.
2293

2294
  @type identifier: string
2295
  @param identifier: Item index or name or UUID
2296
  @type kind: string
2297
  @param kind: One-word item description
2298
  @type container: list
2299
  @param container: Container to get the item from
2300

2301
  """
2302
  # Index
2303
  try:
2304
    idx = int(identifier)
2305
    if idx == -1:
2306
      # Append
2307
      absidx = len(container) - 1
2308
    elif idx < 0:
2309
      raise IndexError("Not accepting negative indices other than -1")
2310
    elif idx > len(container):
2311
      raise IndexError("Got %s index %s, but there are only %s" %
2312
                       (kind, idx, len(container)))
2313
    else:
2314
      absidx = idx
2315
    return (absidx, container[idx])
2316
  except ValueError:
2317
    pass
2318

    
2319
  for idx, item in enumerate(container):
2320
    if item.uuid == identifier or item.name == identifier:
2321
      return (idx, item)
2322

    
2323
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2324
                             (kind, identifier), errors.ECODE_NOENT)
2325

    
2326

    
2327
def _ApplyContainerMods(kind, container, chgdesc, mods,
2328
                        create_fn, modify_fn, remove_fn,
2329
                        post_add_fn=None):
2330
  """Applies descriptions in C{mods} to C{container}.
2331

2332
  @type kind: string
2333
  @param kind: One-word item description
2334
  @type container: list
2335
  @param container: Container to modify
2336
  @type chgdesc: None or list
2337
  @param chgdesc: List of applied changes
2338
  @type mods: list
2339
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2340
  @type create_fn: callable
2341
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2342
    receives absolute item index, parameters and private data object as added
2343
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2344
    as list
2345
  @type modify_fn: callable
2346
  @param modify_fn: Callback for modifying an existing item
2347
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2348
    and private data object as added by L{_PrepareContainerMods}, returns
2349
    changes as list
2350
  @type remove_fn: callable
2351
  @param remove_fn: Callback on removing item; receives absolute item index,
2352
    item and private data object as added by L{_PrepareContainerMods}
2353
  @type post_add_fn: callable
2354
  @param post_add_fn: Callable for post-processing a newly created item after
2355
    it has been put into the container. It receives the index of the new item
2356
    and the new item as parameters.
2357

2358
  """
2359
  for (op, identifier, params, private) in mods:
2360
    changes = None
2361

    
2362
    if op == constants.DDM_ADD:
2363
      # Calculate where item will be added
2364
      # When adding an item, identifier can only be an index
2365
      try:
2366
        idx = int(identifier)
2367
      except ValueError:
2368
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2369
                                   " identifier for %s" % constants.DDM_ADD,
2370
                                   errors.ECODE_INVAL)
2371
      if idx == -1:
2372
        addidx = len(container)
2373
      else:
2374
        if idx < 0:
2375
          raise IndexError("Not accepting negative indices other than -1")
2376
        elif idx > len(container):
2377
          raise IndexError("Got %s index %s, but there are only %s" %
2378
                           (kind, idx, len(container)))
2379
        addidx = idx
2380

    
2381
      if create_fn is None:
2382
        item = params
2383
      else:
2384
        (item, changes) = create_fn(addidx, params, private)
2385

    
2386
      if idx == -1:
2387
        container.append(item)
2388
      else:
2389
        assert idx >= 0
2390
        assert idx <= len(container)
2391
        # list.insert does so before the specified index
2392
        container.insert(idx, item)
2393

    
2394
      if post_add_fn is not None:
2395
        post_add_fn(addidx, item)
2396

    
2397
    else:
2398
      # Retrieve existing item
2399
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2400

    
2401
      if op == constants.DDM_REMOVE:
2402
        assert not params
2403

    
2404
        changes = [("%s/%s" % (kind, absidx), "remove")]
2405

    
2406
        if remove_fn is not None:
2407
          msg = remove_fn(absidx, item, private)
2408
          if msg:
2409
            changes.append(("%s/%s" % (kind, absidx), msg))
2410

    
2411
        assert container[absidx] == item
2412
        del container[absidx]
2413
      elif op == constants.DDM_MODIFY:
2414
        if modify_fn is not None:
2415
          changes = modify_fn(absidx, item, params, private)
2416
      else:
2417
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2418

    
2419
    assert _TApplyContModsCbChanges(changes)
2420

    
2421
    if not (chgdesc is None or changes is None):
2422
      chgdesc.extend(changes)
2423

    
2424

    
2425
def _UpdateIvNames(base_index, disks):
2426
  """Updates the C{iv_name} attribute of disks.
2427

2428
  @type disks: list of L{objects.Disk}
2429

2430
  """
2431
  for (idx, disk) in enumerate(disks):
2432
    disk.iv_name = "disk/%s" % (base_index + idx, )
2433

    
2434

    
2435
class LUInstanceSetParams(LogicalUnit):
2436
  """Modifies an instances's parameters.
2437

2438
  """
2439
  HPATH = "instance-modify"
2440
  HTYPE = constants.HTYPE_INSTANCE
2441
  REQ_BGL = False
2442

    
2443
  @staticmethod
2444
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2445
    assert ht.TList(mods)
2446
    assert not mods or len(mods[0]) in (2, 3)
2447

    
2448
    if mods and len(mods[0]) == 2:
2449
      result = []
2450

    
2451
      addremove = 0
2452
      for op, params in mods:
2453
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2454
          result.append((op, -1, params))
2455
          addremove += 1
2456

    
2457
          if addremove > 1:
2458
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2459
                                       " supported at a time" % kind,
2460
                                       errors.ECODE_INVAL)
2461
        else:
2462
          result.append((constants.DDM_MODIFY, op, params))
2463

    
2464
      assert verify_fn(result)
2465
    else:
2466
      result = mods
2467

    
2468
    return result
2469

    
2470
  @staticmethod
2471
  def _CheckMods(kind, mods, key_types, item_fn):
2472
    """Ensures requested disk/NIC modifications are valid.
2473

2474
    """
2475
    for (op, _, params) in mods:
2476
      assert ht.TDict(params)
2477

    
2478
      # If 'key_types' is an empty dict, we assume we have an
2479
      # 'ext' template and thus do not ForceDictType
2480
      if key_types:
2481
        utils.ForceDictType(params, key_types)
2482

    
2483
      if op == constants.DDM_REMOVE:
2484
        if params:
2485
          raise errors.OpPrereqError("No settings should be passed when"
2486
                                     " removing a %s" % kind,
2487
                                     errors.ECODE_INVAL)
2488
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2489
        item_fn(op, params)
2490
      else:
2491
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2492

    
2493
  def _VerifyDiskModification(self, op, params, excl_stor):
2494
    """Verifies a disk modification.
2495

2496
    """
2497
    if op == constants.DDM_ADD:
2498
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2499
      if mode not in constants.DISK_ACCESS_SET:
2500
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2501
                                   errors.ECODE_INVAL)
2502

    
2503
      size = params.get(constants.IDISK_SIZE, None)
2504
      if size is None:
2505
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2506
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2507
      size = int(size)
2508

    
2509
      params[constants.IDISK_SIZE] = size
2510
      name = params.get(constants.IDISK_NAME, None)
2511
      if name is not None and name.lower() == constants.VALUE_NONE:
2512
        params[constants.IDISK_NAME] = None
2513

    
2514
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2515

    
2516
    elif op == constants.DDM_MODIFY:
2517
      if constants.IDISK_SIZE in params:
2518
        raise errors.OpPrereqError("Disk size change not possible, use"
2519
                                   " grow-disk", errors.ECODE_INVAL)
2520

    
2521
      # Disk modification supports changing only the disk name and mode.
2522
      # Changing arbitrary parameters is allowed only for ext disk template",
2523
      if self.instance.disk_template != constants.DT_EXT:
2524
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2525

    
2526
      name = params.get(constants.IDISK_NAME, None)
2527
      if name is not None and name.lower() == constants.VALUE_NONE:
2528
        params[constants.IDISK_NAME] = None
2529

    
2530
  @staticmethod
2531
  def _VerifyNicModification(op, params):
2532
    """Verifies a network interface modification.
2533

2534
    """
2535
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2536
      ip = params.get(constants.INIC_IP, None)
2537
      name = params.get(constants.INIC_NAME, None)
2538
      req_net = params.get(constants.INIC_NETWORK, None)
2539
      link = params.get(constants.NIC_LINK, None)
2540
      mode = params.get(constants.NIC_MODE, None)
2541
      if name is not None and name.lower() == constants.VALUE_NONE:
2542
        params[constants.INIC_NAME] = None
2543
      if req_net is not None:
2544
        if req_net.lower() == constants.VALUE_NONE:
2545
          params[constants.INIC_NETWORK] = None
2546
          req_net = None
2547
        elif link is not None or mode is not None:
2548
          raise errors.OpPrereqError("If network is given"
2549
                                     " mode or link should not",
2550
                                     errors.ECODE_INVAL)
2551

    
2552
      if op == constants.DDM_ADD:
2553
        macaddr = params.get(constants.INIC_MAC, None)
2554
        if macaddr is None:
2555
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2556

    
2557
      if ip is not None:
2558
        if ip.lower() == constants.VALUE_NONE:
2559
          params[constants.INIC_IP] = None
2560
        else:
2561
          if ip.lower() == constants.NIC_IP_POOL:
2562
            if op == constants.DDM_ADD and req_net is None:
2563
              raise errors.OpPrereqError("If ip=pool, parameter network"
2564
                                         " cannot be none",
2565
                                         errors.ECODE_INVAL)
2566
          else:
2567
            if not netutils.IPAddress.IsValid(ip):
2568
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2569
                                         errors.ECODE_INVAL)
2570

    
2571
      if constants.INIC_MAC in params:
2572
        macaddr = params[constants.INIC_MAC]
2573
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2574
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2575

    
2576
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2577
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2578
                                     " modifying an existing NIC",
2579
                                     errors.ECODE_INVAL)
2580

    
2581
  def CheckArguments(self):
2582
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2583
            self.op.hvparams or self.op.beparams or self.op.os_name or
2584
            self.op.osparams or self.op.offline is not None or
2585
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2586
            self.op.instance_communication is not None):
2587
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2588

    
2589
    if self.op.hvparams:
2590
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2591
                           "hypervisor", "instance", "cluster")
2592

    
2593
    self.op.disks = self._UpgradeDiskNicMods(
2594
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2595
    self.op.nics = self._UpgradeDiskNicMods(
2596
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2597

    
2598
    if self.op.disks and self.op.disk_template is not None:
2599
      raise errors.OpPrereqError("Disk template conversion and other disk"
2600
                                 " changes not supported at the same time",
2601
                                 errors.ECODE_INVAL)
2602

    
2603
    if (self.op.disk_template and
2604
        self.op.disk_template in constants.DTS_INT_MIRROR and
2605
        self.op.remote_node is None):
2606
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2607
                                 " one requires specifying a secondary node",
2608
                                 errors.ECODE_INVAL)
2609

    
2610
    # Check NIC modifications
2611
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2612
                    self._VerifyNicModification)
2613

    
2614
    if self.op.pnode:
2615
      (self.op.pnode_uuid, self.op.pnode) = \
2616
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2617

    
2618
  def ExpandNames(self):
2619
    self._ExpandAndLockInstance()
2620
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2621
    # Can't even acquire node locks in shared mode as upcoming changes in
2622
    # Ganeti 2.6 will start to modify the node object on disk conversion
2623
    self.needed_locks[locking.LEVEL_NODE] = []
2624
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2625
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2626
    # Look node group to look up the ipolicy
2627
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2628

    
2629
  def DeclareLocks(self, level):
2630
    if level == locking.LEVEL_NODEGROUP:
2631
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2632
      # Acquire locks for the instance's nodegroups optimistically. Needs
2633
      # to be verified in CheckPrereq
2634
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2635
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2636
    elif level == locking.LEVEL_NODE:
2637
      self._LockInstancesNodes()
2638
      if self.op.disk_template and self.op.remote_node:
2639
        (self.op.remote_node_uuid, self.op.remote_node) = \
2640
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2641
                                self.op.remote_node)
2642
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2643
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2644
      # Copy node locks
2645
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2646
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2647

    
2648
  def BuildHooksEnv(self):
2649
    """Build hooks env.
2650

2651
    This runs on the master, primary and secondaries.
2652

2653
    """
2654
    args = {}
2655
    if constants.BE_MINMEM in self.be_new:
2656
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2657
    if constants.BE_MAXMEM in self.be_new:
2658
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2659
    if constants.BE_VCPUS in self.be_new:
2660
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2661
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2662
    # information at all.
2663

    
2664
    if self._new_nics is not None:
2665
      nics = []
2666

    
2667
      for nic in self._new_nics:
2668
        n = copy.deepcopy(nic)
2669
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2670
        n.nicparams = nicparams
2671
        nics.append(NICToTuple(self, n))
2672

    
2673
      args["nics"] = nics
2674

    
2675
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2676
    if self.op.disk_template:
2677
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2678
    if self.op.runtime_mem:
2679
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2680

    
2681
    return env
2682

    
2683
  def BuildHooksNodes(self):
2684
    """Build hooks nodes.
2685

2686
    """
2687
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2688
    return (nl, nl)
2689

    
2690
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2691
                              old_params, cluster, pnode_uuid):
2692

    
2693
    update_params_dict = dict([(key, params[key])
2694
                               for key in constants.NICS_PARAMETERS
2695
                               if key in params])
2696

    
2697
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2698
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2699

    
2700
    new_net_uuid = None
2701
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2702
    if new_net_uuid_or_name:
2703
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2704
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2705

    
2706
    if old_net_uuid:
2707
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2708

    
2709
    if new_net_uuid:
2710
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2711
      if not netparams:
2712
        raise errors.OpPrereqError("No netparams found for the network"
2713
                                   " %s, probably not connected" %
2714
                                   new_net_obj.name, errors.ECODE_INVAL)
2715
      new_params = dict(netparams)
2716
    else:
2717
      new_params = GetUpdatedParams(old_params, update_params_dict)
2718

    
2719
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2720

    
2721
    new_filled_params = cluster.SimpleFillNIC(new_params)
2722
    objects.NIC.CheckParameterSyntax(new_filled_params)
2723

    
2724
    new_mode = new_filled_params[constants.NIC_MODE]
2725
    if new_mode == constants.NIC_MODE_BRIDGED:
2726
      bridge = new_filled_params[constants.NIC_LINK]
2727
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2728
      if msg:
2729
        msg = "Error checking bridges on node '%s': %s" % \
2730
                (self.cfg.GetNodeName(pnode_uuid), msg)
2731
        if self.op.force:
2732
          self.warn.append(msg)
2733
        else:
2734
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2735

    
2736
    elif new_mode == constants.NIC_MODE_ROUTED:
2737
      ip = params.get(constants.INIC_IP, old_ip)
2738
      if ip is None:
2739
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2740
                                   " on a routed NIC", errors.ECODE_INVAL)
2741

    
2742
    elif new_mode == constants.NIC_MODE_OVS:
2743
      # TODO: check OVS link
2744
      self.LogInfo("OVS links are currently not checked for correctness")
2745

    
2746
    if constants.INIC_MAC in params:
2747
      mac = params[constants.INIC_MAC]
2748
      if mac is None:
2749
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2750
                                   errors.ECODE_INVAL)
2751
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2752
        # otherwise generate the MAC address
2753
        params[constants.INIC_MAC] = \
2754
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2755
      else:
2756
        # or validate/reserve the current one
2757
        try:
2758
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2759
        except errors.ReservationError:
2760
          raise errors.OpPrereqError("MAC address '%s' already in use"
2761
                                     " in cluster" % mac,
2762
                                     errors.ECODE_NOTUNIQUE)
2763
    elif new_net_uuid != old_net_uuid:
2764

    
2765
      def get_net_prefix(net_uuid):
2766
        mac_prefix = None
2767
        if net_uuid:
2768
          nobj = self.cfg.GetNetwork(net_uuid)
2769
          mac_prefix = nobj.mac_prefix
2770

    
2771
        return mac_prefix
2772

    
2773
      new_prefix = get_net_prefix(new_net_uuid)
2774
      old_prefix = get_net_prefix(old_net_uuid)
2775
      if old_prefix != new_prefix:
2776
        params[constants.INIC_MAC] = \
2777
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2778

    
2779
    # if there is a change in (ip, network) tuple
2780
    new_ip = params.get(constants.INIC_IP, old_ip)
2781
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2782
      if new_ip:
2783
        # if IP is pool then require a network and generate one IP
2784
        if new_ip.lower() == constants.NIC_IP_POOL:
2785
          if new_net_uuid:
2786
            try:
2787
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2788
            except errors.ReservationError:
2789
              raise errors.OpPrereqError("Unable to get a free IP"
2790
                                         " from the address pool",
2791
                                         errors.ECODE_STATE)
2792
            self.LogInfo("Chose IP %s from network %s",
2793
                         new_ip,
2794
                         new_net_obj.name)
2795
            params[constants.INIC_IP] = new_ip
2796
          else:
2797
            raise errors.OpPrereqError("ip=pool, but no network found",
2798
                                       errors.ECODE_INVAL)
2799
        # Reserve new IP if in the new network if any
2800
        elif new_net_uuid:
2801
          try:
2802
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2803
                               check=self.op.conflicts_check)
2804
            self.LogInfo("Reserving IP %s in network %s",
2805
                         new_ip, new_net_obj.name)
2806
          except errors.ReservationError:
2807
            raise errors.OpPrereqError("IP %s not available in network %s" %
2808
                                       (new_ip, new_net_obj.name),
2809
                                       errors.ECODE_NOTUNIQUE)
2810
        # new network is None so check if new IP is a conflicting IP
2811
        elif self.op.conflicts_check:
2812
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2813

    
2814
      # release old IP if old network is not None
2815
      if old_ip and old_net_uuid:
2816
        try:
2817
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2818
        except errors.AddressPoolError:
2819
          logging.warning("Release IP %s not contained in network %s",
2820
                          old_ip, old_net_obj.name)
2821

    
2822
    # there are no changes in (ip, network) tuple and old network is not None
2823
    elif (old_net_uuid is not None and
2824
          (req_link is not None or req_mode is not None)):
2825
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2826
                                 " a NIC that is connected to a network",
2827
                                 errors.ECODE_INVAL)
2828

    
2829
    private.params = new_params
2830
    private.filled = new_filled_params
2831

    
2832
  def _PreCheckDiskTemplate(self, pnode_info):
2833
    """CheckPrereq checks related to a new disk template."""
2834
    # Arguments are passed to avoid configuration lookups
2835
    pnode_uuid = self.instance.primary_node
2836
    if self.instance.disk_template == self.op.disk_template:
2837
      raise errors.OpPrereqError("Instance already has disk template %s" %
2838
                                 self.instance.disk_template,
2839
                                 errors.ECODE_INVAL)
2840

    
2841
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2842
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2843
                                 " cluster." % self.op.disk_template)
2844

    
2845
    if (self.instance.disk_template,
2846
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2847
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2848
                                 " %s to %s" % (self.instance.disk_template,
2849
                                                self.op.disk_template),
2850
                                 errors.ECODE_INVAL)
2851
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2852
                       msg="cannot change disk template")
2853
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2854
      if self.op.remote_node_uuid == pnode_uuid:
2855
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2856
                                   " as the primary node of the instance" %
2857
                                   self.op.remote_node, errors.ECODE_STATE)
2858
      CheckNodeOnline(self, self.op.remote_node_uuid)
2859
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2860
      # FIXME: here we assume that the old instance type is DT_PLAIN
2861
      assert self.instance.disk_template == constants.DT_PLAIN
2862
      disks = [{constants.IDISK_SIZE: d.size,
2863
                constants.IDISK_VG: d.logical_id[0]}
2864
               for d in self.instance.disks]
2865
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2866
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2867

    
2868
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2869
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2870
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2871
                                                              snode_group)
2872
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2873
                             ignore=self.op.ignore_ipolicy)
2874
      if pnode_info.group != snode_info.group:
2875
        self.LogWarning("The primary and secondary nodes are in two"
2876
                        " different node groups; the disk parameters"
2877
                        " from the first disk's node group will be"
2878
                        " used")
2879

    
2880
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2881
      # Make sure none of the nodes require exclusive storage
2882
      nodes = [pnode_info]
2883
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2884
        assert snode_info
2885
        nodes.append(snode_info)
2886
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2887
      if compat.any(map(has_es, nodes)):
2888
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2889
                  " storage is enabled" % (self.instance.disk_template,
2890
                                           self.op.disk_template))
2891
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2892

    
2893
  def _PreCheckDisks(self, ispec):
2894
    """CheckPrereq checks related to disk changes.
2895

2896
    @type ispec: dict
2897
    @param ispec: instance specs to be updated with the new disks
2898

2899
    """
2900
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2901

    
2902
    excl_stor = compat.any(
2903
      rpc.GetExclusiveStorageForNodes(self.cfg,
2904
                                      self.instance.all_nodes).values()
2905
      )
2906

    
2907
    # Check disk modifications. This is done here and not in CheckArguments
2908
    # (as with NICs), because we need to know the instance's disk template
2909
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2910
    if self.instance.disk_template == constants.DT_EXT:
2911
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2912
    else:
2913
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2914
                      ver_fn)
2915

    
2916
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2917

    
2918
    # Check the validity of the `provider' parameter
2919
    if self.instance.disk_template in constants.DT_EXT:
2920
      for mod in self.diskmod:
2921
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2922
        if mod[0] == constants.DDM_ADD:
2923
          if ext_provider is None:
2924
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2925
                                       " '%s' missing, during disk add" %
2926
                                       (constants.DT_EXT,
2927
                                        constants.IDISK_PROVIDER),
2928
                                       errors.ECODE_NOENT)
2929
        elif mod[0] == constants.DDM_MODIFY:
2930
          if ext_provider:
2931
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2932
                                       " modification" %
2933
                                       constants.IDISK_PROVIDER,
2934
                                       errors.ECODE_INVAL)
2935
    else:
2936
      for mod in self.diskmod:
2937
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2938
        if ext_provider is not None:
2939
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2940
                                     " instances of type '%s'" %
2941
                                     (constants.IDISK_PROVIDER,
2942
                                      constants.DT_EXT),
2943
                                     errors.ECODE_INVAL)
2944

    
2945
    if not self.op.wait_for_sync and self.instance.disks_active:
2946
      for mod in self.diskmod:
2947
        if mod[0] == constants.DDM_ADD:
2948
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2949
                                     " activated disks and"
2950
                                     " --no-wait-for-sync given.",
2951
                                     errors.ECODE_INVAL)
2952

    
2953
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2954
      raise errors.OpPrereqError("Disk operations not supported for"
2955
                                 " diskless instances", errors.ECODE_INVAL)
2956

    
2957
    def _PrepareDiskMod(_, disk, params, __):
2958
      disk.name = params.get(constants.IDISK_NAME, None)
2959

    
2960
    # Verify disk changes (operating on a copy)
2961
    disks = copy.deepcopy(self.instance.disks)
2962
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2963
                        _PrepareDiskMod, None)
2964
    utils.ValidateDeviceNames("disk", disks)
2965
    if len(disks) > constants.MAX_DISKS:
2966
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2967
                                 " more" % constants.MAX_DISKS,
2968
                                 errors.ECODE_STATE)
2969
    disk_sizes = [disk.size for disk in self.instance.disks]
2970
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2971
                      self.diskmod if op == constants.DDM_ADD)
2972
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2973
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2974

    
2975
    if self.op.offline is not None and self.op.offline:
2976
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2977
                         msg="can't change to offline")
2978

    
2979
  @staticmethod
2980
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2981
    """Create a NIC mod that adds or removes the instance
2982
    communication NIC to a running instance.
2983

2984
    The NICS are dynamically created using the Dynamic Device
2985
    Modification (DDM).  This function produces a NIC modification
2986
    (mod) that inserts an additional NIC meant for instance
2987
    communication in or removes an existing instance communication NIC
2988
    from a running instance, using DDM.
2989

2990
    @type cfg: L{config.ConfigWriter}
2991
    @param cfg: cluster configuration
2992

2993
    @type instance_communication: boolean
2994
    @param instance_communication: whether instance communication is
2995
                                   enabled or disabled
2996

2997
    @type instance: L{objects.Instance}
2998
    @param instance: instance to which the NIC mod will be applied to
2999

3000
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
3001
            (L{constants.DDM_REMOVE}, -1, parameters) or
3002
            L{None}
3003
    @return: DDM mod containing an action to add or remove the NIC, or
3004
             None if nothing needs to be done
3005

3006
    """
3007
    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
3008

    
3009
    instance_communication_nic = None
3010

    
3011
    for nic in instance.nics:
3012
      if nic.name == nic_name:
3013
        instance_communication_nic = nic
3014
        break
3015

    
3016
    if instance_communication and not instance_communication_nic:
3017
      action = constants.DDM_ADD
3018
      params = {constants.INIC_NAME: nic_name,
3019
                constants.INIC_MAC: constants.VALUE_GENERATE,
3020
                constants.INIC_IP: constants.NIC_IP_POOL,
3021
                constants.INIC_NETWORK:
3022
                  cfg.GetInstanceCommunicationNetwork()}
3023
    elif not instance_communication and instance_communication_nic:
3024
      action = constants.DDM_REMOVE
3025
      params = None
3026
    else:
3027
      action = None
3028
      params = None
3029

    
3030
    if action is not None:
3031
      return (action, -1, params)
3032
    else:
3033
      return None
3034

    
3035
  def CheckPrereq(self):
3036
    """Check prerequisites.
3037

3038
    This only checks the instance list against the existing names.
3039

3040
    """
3041
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3042
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3043
    self.cluster = self.cfg.GetClusterInfo()
3044
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
3045

    
3046
    assert self.instance is not None, \
3047
      "Cannot retrieve locked instance %s" % self.op.instance_name
3048

    
3049
    pnode_uuid = self.instance.primary_node
3050

    
3051
    self.warn = []
3052

    
3053
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
3054
        not self.op.force):
3055
      # verify that the instance is not up
3056
      instance_info = self.rpc.call_instance_info(
3057
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3058
          cluster_hvparams)
3059
      if instance_info.fail_msg:
3060
        self.warn.append("Can't get instance runtime information: %s" %
3061
                         instance_info.fail_msg)
3062
      elif instance_info.payload:
3063
        raise errors.OpPrereqError("Instance is still running on %s" %
3064
                                   self.cfg.GetNodeName(pnode_uuid),
3065
                                   errors.ECODE_STATE)
3066

    
3067
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3068
    node_uuids = list(self.instance.all_nodes)
3069
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3070

    
3071
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3072
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3073
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3074

    
3075
    # dictionary with instance information after the modification
3076
    ispec = {}
3077

    
3078
    if self.op.hotplug or self.op.hotplug_if_possible:
3079
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3080
                                               self.instance)
3081
      if result.fail_msg:
3082
        if self.op.hotplug:
3083
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3084
                       prereq=True)
3085
        else:
3086
          self.LogWarning(result.fail_msg)
3087
          self.op.hotplug = False
3088
          self.LogInfo("Modification will take place without hotplugging.")
3089
      else:
3090
        self.op.hotplug = True
3091

    
3092
    # Prepare NIC modifications
3093
    # add or remove NIC for instance communication
3094
    if self.op.instance_communication is not None:
3095
      mod = self._InstanceCommunicationDDM(self.cfg,
3096
                                           self.op.instance_communication,
3097
                                           self.instance)
3098
      if mod is not None:
3099
        self.op.nics.append(mod)
3100

    
3101
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3102

    
3103
    # OS change
3104
    if self.op.os_name and not self.op.force:
3105
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
3106
                     self.op.force_variant)
3107
      instance_os = self.op.os_name
3108
    else:
3109
      instance_os = self.instance.os
3110

    
3111
    assert not (self.op.disk_template and self.op.disks), \
3112
      "Can't modify disk template and apply disk changes at the same time"
3113

    
3114
    if self.op.disk_template:
3115
      self._PreCheckDiskTemplate(pnode_info)
3116

    
3117
    self._PreCheckDisks(ispec)
3118

    
3119
    # hvparams processing
3120
    if self.op.hvparams:
3121
      hv_type = self.instance.hypervisor
3122
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3123
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3124
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3125

    
3126
      # local check
3127
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3128
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3129
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3130
      self.hv_inst = i_hvdict # the new dict (without defaults)
3131
    else:
3132
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3133
                                                   self.instance.os,
3134
                                                   self.instance.hvparams)
3135
      self.hv_new = self.hv_inst = {}
3136

    
3137
    # beparams processing
3138
    if self.op.beparams:
3139
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3140
                                  use_none=True)
3141
      objects.UpgradeBeParams(i_bedict)
3142
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3143
      be_new = self.cluster.SimpleFillBE(i_bedict)
3144
      self.be_proposed = self.be_new = be_new # the new actual values
3145
      self.be_inst = i_bedict # the new dict (without defaults)
3146
    else:
3147
      self.be_new = self.be_inst = {}
3148
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3149
    be_old = self.cluster.FillBE(self.instance)
3150

    
3151
    # CPU param validation -- checking every time a parameter is
3152
    # changed to cover all cases where either CPU mask or vcpus have
3153
    # changed
3154
    if (constants.BE_VCPUS in self.be_proposed and
3155
        constants.HV_CPU_MASK in self.hv_proposed):
3156
      cpu_list = \
3157
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3158
      # Verify mask is consistent with number of vCPUs. Can skip this
3159
      # test if only 1 entry in the CPU mask, which means same mask
3160
      # is applied to all vCPUs.
3161
      if (len(cpu_list) > 1 and
3162
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3163
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3164
                                   " CPU mask [%s]" %
3165
                                   (self.be_proposed[constants.BE_VCPUS],
3166
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3167
                                   errors.ECODE_INVAL)
3168

    
3169
      # Only perform this test if a new CPU mask is given
3170
      if constants.HV_CPU_MASK in self.hv_new:
3171
        # Calculate the largest CPU number requested
3172
        max_requested_cpu = max(map(max, cpu_list))
3173
        # Check that all of the instance's nodes have enough physical CPUs to
3174
        # satisfy the requested CPU mask
3175
        hvspecs = [(self.instance.hypervisor,
3176
                    self.cfg.GetClusterInfo()
3177
                      .hvparams[self.instance.hypervisor])]
3178
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3179
                                max_requested_cpu + 1,
3180
                                hvspecs)
3181

    
3182
    # osparams processing
3183
    if self.op.osparams or self.op.osparams_private:
3184
      public_parms = self.op.osparams or {}
3185
      private_parms = self.op.osparams_private or {}
3186
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3187

    
3188
      if dupe_keys:
3189
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3190
                                   utils.CommaJoin(dupe_keys))
3191

    
3192
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3193
                                      public_parms)
3194
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3195
                                              private_parms)
3196

    
3197
      CheckOSParams(self, True, node_uuids, instance_os,
3198
                    objects.FillDict(self.os_inst,
3199
                                     self.os_inst_private))
3200

    
3201
    else:
3202
      self.os_inst = {}
3203
      self.os_inst_private = {}
3204

    
3205
    #TODO(dynmem): do the appropriate check involving MINMEM
3206
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3207
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3208
      mem_check_list = [pnode_uuid]
3209
      if be_new[constants.BE_AUTO_BALANCE]:
3210
        # either we changed auto_balance to yes or it was from before
3211
        mem_check_list.extend(self.instance.secondary_nodes)
3212
      instance_info = self.rpc.call_instance_info(
3213
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3214
          cluster_hvparams)
3215
      hvspecs = [(self.instance.hypervisor,
3216
                  cluster_hvparams)]
3217
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3218
                                         hvspecs)
3219
      pninfo = nodeinfo[pnode_uuid]
3220
      msg = pninfo.fail_msg
3221
      if msg:
3222
        # Assume the primary node is unreachable and go ahead
3223
        self.warn.append("Can't get info from primary node %s: %s" %
3224
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3225
      else:
3226
        (_, _, (pnhvinfo, )) = pninfo.payload
3227
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3228
          self.warn.append("Node data from primary node %s doesn't contain"
3229
                           " free memory information" %
3230
                           self.cfg.GetNodeName(pnode_uuid))
3231
        elif instance_info.fail_msg:
3232
          self.warn.append("Can't get instance runtime information: %s" %
3233
                           instance_info.fail_msg)
3234
        else:
3235
          if instance_info.payload:
3236
            current_mem = int(instance_info.payload["memory"])
3237
          else:
3238
            # Assume instance not running
3239
            # (there is a slight race condition here, but it's not very
3240
            # probable, and we have no other way to check)
3241
            # TODO: Describe race condition
3242
            current_mem = 0
3243
          #TODO(dynmem): do the appropriate check involving MINMEM
3244
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3245
                      pnhvinfo["memory_free"])
3246
          if miss_mem > 0:
3247
            raise errors.OpPrereqError("This change will prevent the instance"
3248
                                       " from starting, due to %d MB of memory"
3249
                                       " missing on its primary node" %
3250
                                       miss_mem, errors.ECODE_NORES)
3251

    
3252
      if be_new[constants.BE_AUTO_BALANCE]:
3253
        for node_uuid, nres in nodeinfo.items():
3254
          if node_uuid not in self.instance.secondary_nodes:
3255
            continue
3256
          nres.Raise("Can't get info from secondary node %s" %
3257
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3258
                     ecode=errors.ECODE_STATE)
3259
          (_, _, (nhvinfo, )) = nres.payload
3260
          if not isinstance(nhvinfo.get("memory_free", None), int):
3261
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3262
                                       " memory information" %
3263
                                       self.cfg.GetNodeName(node_uuid),
3264
                                       errors.ECODE_STATE)
3265
          #TODO(dynmem): do the appropriate check involving MINMEM
3266
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3267
            raise errors.OpPrereqError("This change will prevent the instance"
3268
                                       " from failover to its secondary node"
3269
                                       " %s, due to not enough memory" %
3270
                                       self.cfg.GetNodeName(node_uuid),
3271
                                       errors.ECODE_STATE)
3272

    
3273
    if self.op.runtime_mem:
3274
      remote_info = self.rpc.call_instance_info(
3275
         self.instance.primary_node, self.instance.name,
3276
         self.instance.hypervisor,
3277
         cluster_hvparams)
3278
      remote_info.Raise("Error checking node %s" %
3279
                        self.cfg.GetNodeName(self.instance.primary_node))
3280
      if not remote_info.payload: # not running already
3281
        raise errors.OpPrereqError("Instance %s is not running" %
3282
                                   self.instance.name, errors.ECODE_STATE)
3283

    
3284
      current_memory = remote_info.payload["memory"]
3285
      if (not self.op.force and
3286
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3287
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3288
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3289
                                   " and %d MB of memory unless --force is"
3290
                                   " given" %
3291
                                   (self.instance.name,
3292
                                    self.be_proposed[constants.BE_MINMEM],
3293
                                    self.be_proposed[constants.BE_MAXMEM]),
3294
                                   errors.ECODE_INVAL)
3295

    
3296
      delta = self.op.runtime_mem - current_memory
3297
      if delta > 0:
3298
        CheckNodeFreeMemory(
3299
            self, self.instance.primary_node,
3300
            "ballooning memory for instance %s" % self.instance.name, delta,
3301
            self.instance.hypervisor,
3302
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3303

    
3304
    # make self.cluster visible in the functions below
3305
    cluster = self.cluster
3306

    
3307
    def _PrepareNicCreate(_, params, private):
3308
      self._PrepareNicModification(params, private, None, None,
3309
                                   {}, cluster, pnode_uuid)
3310
      return (None, None)
3311

    
3312
    def _PrepareNicMod(_, nic, params, private):
3313
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3314
                                   nic.nicparams, cluster, pnode_uuid)
3315
      return None
3316

    
3317
    def _PrepareNicRemove(_, params, __):
3318
      ip = params.ip
3319
      net = params.network
3320
      if net is not None and ip is not None:
3321
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3322

    
3323
    # Verify NIC changes (operating on copy)
3324
    nics = [nic.Copy() for nic in self.instance.nics]
3325
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3326
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3327
    if len(nics) > constants.MAX_NICS:
3328
      raise errors.OpPrereqError("Instance has too many network interfaces"
3329
                                 " (%d), cannot add more" % constants.MAX_NICS,
3330
                                 errors.ECODE_STATE)
3331

    
3332
    # Pre-compute NIC changes (necessary to use result in hooks)
3333
    self._nic_chgdesc = []
3334
    if self.nicmod:
3335
      # Operate on copies as this is still in prereq
3336
      nics = [nic.Copy() for nic in self.instance.nics]
3337
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3338
                          self._CreateNewNic, self._ApplyNicMods,
3339
                          self._RemoveNic)
3340
      # Verify that NIC names are unique and valid
3341
      utils.ValidateDeviceNames("NIC", nics)
3342
      self._new_nics = nics
3343
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3344
    else:
3345
      self._new_nics = None
3346
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3347

    
3348
    if not self.op.ignore_ipolicy:
3349
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3350
                                                              group_info)
3351

    
3352
      # Fill ispec with backend parameters
3353
      ispec[constants.ISPEC_SPINDLE_USE] = \
3354
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3355
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3356
                                                         None)
3357

    
3358
      # Copy ispec to verify parameters with min/max values separately
3359
      if self.op.disk_template:
3360
        new_disk_template = self.op.disk_template
3361
      else:
3362
        new_disk_template = self.instance.disk_template
3363
      ispec_max = ispec.copy()
3364
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3365
        self.be_new.get(constants.BE_MAXMEM, None)
3366
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3367
                                                     new_disk_template)
3368
      ispec_min = ispec.copy()
3369
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3370
        self.be_new.get(constants.BE_MINMEM, None)
3371
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3372
                                                     new_disk_template)
3373

    
3374
      if (res_max or res_min):
3375
        # FIXME: Improve error message by including information about whether
3376
        # the upper or lower limit of the parameter fails the ipolicy.
3377
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3378
               (group_info, group_info.name,
3379
                utils.CommaJoin(set(res_max + res_min))))
3380
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3381

    
3382
  def _ConvertPlainToDrbd(self, feedback_fn):
3383
    """Converts an instance from plain to drbd.
3384

3385
    """
3386
    feedback_fn("Converting template to drbd")
3387
    pnode_uuid = self.instance.primary_node
3388
    snode_uuid = self.op.remote_node_uuid
3389

    
3390
    assert self.instance.disk_template == constants.DT_PLAIN
3391

    
3392
    # create a fake disk info for _GenerateDiskTemplate
3393
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3394
                  constants.IDISK_VG: d.logical_id[0],
3395
                  constants.IDISK_NAME: d.name}
3396
                 for d in self.instance.disks]
3397
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3398
                                     self.instance.uuid, pnode_uuid,
3399
                                     [snode_uuid], disk_info, None, None, 0,
3400
                                     feedback_fn, self.diskparams)
3401
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3402
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3403
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3404
    info = GetInstanceInfoText(self.instance)
3405
    feedback_fn("Creating additional volumes...")
3406
    # first, create the missing data and meta devices
3407
    for disk in anno_disks:
3408
      # unfortunately this is... not too nice
3409
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3410
                           info, True, p_excl_stor)
3411
      for child in disk.children:
3412
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3413
                             s_excl_stor)
3414
    # at this stage, all new LVs have been created, we can rename the
3415
    # old ones
3416
    feedback_fn("Renaming original volumes...")
3417
    rename_list = [(o, n.children[0].logical_id)
3418
                   for (o, n) in zip(self.instance.disks, new_disks)]
3419
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3420
    result.Raise("Failed to rename original LVs")
3421

    
3422
    feedback_fn("Initializing DRBD devices...")
3423
    # all child devices are in place, we can now create the DRBD devices
3424
    try:
3425
      for disk in anno_disks:
3426
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3427
                                       (snode_uuid, s_excl_stor)]:
3428
          f_create = node_uuid == pnode_uuid
3429
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3430
                               f_create, excl_stor)
3431
    except errors.GenericError, e:
3432
      feedback_fn("Initializing of DRBD devices failed;"
3433
                  " renaming back original volumes...")
3434
      rename_back_list = [(n.children[0], o.logical_id)
3435
                          for (n, o) in zip(new_disks, self.instance.disks)]
3436
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3437
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3438
      raise
3439

    
3440
    # at this point, the instance has been modified
3441
    self.instance.disk_template = constants.DT_DRBD8
3442
    self.instance.disks = new_disks
3443
    self.cfg.Update(self.instance, feedback_fn)
3444

    
3445
    # Release node locks while waiting for sync
3446
    ReleaseLocks(self, locking.LEVEL_NODE)
3447

    
3448
    # disks are created, waiting for sync
3449
    disk_abort = not WaitForSync(self, self.instance,
3450
                                 oneshot=not self.op.wait_for_sync)
3451
    if disk_abort:
3452
      raise errors.OpExecError("There are some degraded disks for"
3453
                               " this instance, please cleanup manually")
3454

    
3455
    # Node resource locks will be released by caller
3456

    
3457
  def _ConvertDrbdToPlain(self, feedback_fn):
3458
    """Converts an instance from drbd to plain.
3459

3460
    """
3461
    assert len(self.instance.secondary_nodes) == 1
3462
    assert self.instance.disk_template == constants.DT_DRBD8
3463

    
3464
    pnode_uuid = self.instance.primary_node
3465
    snode_uuid = self.instance.secondary_nodes[0]
3466
    feedback_fn("Converting template to plain")
3467

    
3468
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3469
    new_disks = [d.children[0] for d in self.instance.disks]
3470

    
3471
    # copy over size, mode and name
3472
    for parent, child in zip(old_disks, new_disks):
3473
      child.size = parent.size
3474
      child.mode = parent.mode
3475
      child.name = parent.name
3476

    
3477
    # this is a DRBD disk, return its port to the pool
3478
    # NOTE: this must be done right before the call to cfg.Update!
3479
    for disk in old_disks:
3480
      tcp_port = disk.logical_id[2]
3481
      self.cfg.AddTcpUdpPort(tcp_port)
3482

    
3483
    # update instance structure
3484
    self.instance.disks = new_disks
3485
    self.instance.disk_template = constants.DT_PLAIN
3486
    _UpdateIvNames(0, self.instance.disks)
3487
    self.cfg.Update(self.instance, feedback_fn)
3488

    
3489
    # Release locks in case removing disks takes a while
3490
    ReleaseLocks(self, locking.LEVEL_NODE)
3491

    
3492
    feedback_fn("Removing volumes on the secondary node...")
3493
    for disk in old_disks:
3494
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3495
      result.Warn("Could not remove block device %s on node %s,"
3496
                  " continuing anyway" %
3497
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3498
                  self.LogWarning)
3499

    
3500
    feedback_fn("Removing unneeded volumes on the primary node...")
3501
    for idx, disk in enumerate(old_disks):
3502
      meta = disk.children[1]
3503
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3504
      result.Warn("Could not remove metadata for disk %d on node %s,"
3505
                  " continuing anyway" %
3506
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3507
                  self.LogWarning)
3508

    
3509
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3510
    self.LogInfo("Trying to hotplug device...")
3511
    msg = "hotplug:"
3512
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3513
                                          self.instance, action, dev_type,
3514
                                          (device, self.instance),
3515
                                          extra, seq)
3516
    if result.fail_msg:
3517
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3518
      self.LogInfo("Continuing execution..")
3519
      msg += "failed"
3520
    else:
3521
      self.LogInfo("Hotplug done.")
3522
      msg += "done"
3523
    return msg
3524

    
3525
  def _CreateNewDisk(self, idx, params, _):
3526
    """Creates a new disk.
3527

3528
    """
3529
    # add a new disk
3530
    if self.instance.disk_template in constants.DTS_FILEBASED:
3531
      (file_driver, file_path) = self.instance.disks[0].logical_id
3532
      file_path = os.path.dirname(file_path)
3533
    else:
3534
      file_driver = file_path = None
3535

    
3536
    disk = \
3537
      GenerateDiskTemplate(self, self.instance.disk_template,
3538
                           self.instance.uuid, self.instance.primary_node,
3539
                           self.instance.secondary_nodes, [params], file_path,
3540
                           file_driver, idx, self.Log, self.diskparams)[0]
3541

    
3542
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3543

    
3544
    if self.cluster.prealloc_wipe_disks:
3545
      # Wipe new disk
3546
      WipeOrCleanupDisks(self, self.instance,
3547
                         disks=[(idx, disk, 0)],
3548
                         cleanup=new_disks)
3549

    
3550
    changes = [
3551
      ("disk/%d" % idx,
3552
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3553
      ]
3554
    if self.op.hotplug:
3555
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3556
                                               (disk, self.instance),
3557
                                               self.instance.name, True, idx)
3558
      if result.fail_msg:
3559
        changes.append(("disk/%d" % idx, "assemble:failed"))
3560
        self.LogWarning("Can't assemble newly created disk %d: %s",
3561
                        idx, result.fail_msg)
3562
      else:
3563
        _, link_name = result.payload
3564
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3565
                                  constants.HOTPLUG_TARGET_DISK,
3566
                                  disk, link_name, idx)
3567
        changes.append(("disk/%d" % idx, msg))
3568

    
3569
    return (disk, changes)
3570

    
3571
  def _PostAddDisk(self, _, disk):
3572
    if not WaitForSync(self, self.instance, disks=[disk],
3573
                       oneshot=not self.op.wait_for_sync):
3574
      raise errors.OpExecError("Failed to sync disks of %s" %
3575
                               self.instance.name)
3576

    
3577
    # the disk is active at this point, so deactivate it if the instance disks
3578
    # are supposed to be inactive
3579
    if not self.instance.disks_active:
3580
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3581

    
3582
  def _ModifyDisk(self, idx, disk, params, _):
3583
    """Modifies a disk.
3584

3585
    """
3586
    changes = []
3587
    if constants.IDISK_MODE in params:
3588
      disk.mode = params.get(constants.IDISK_MODE)
3589
      changes.append(("disk.mode/%d" % idx, disk.mode))
3590

    
3591
    if constants.IDISK_NAME in params:
3592
      disk.name = params.get(constants.IDISK_NAME)
3593
      changes.append(("disk.name/%d" % idx, disk.name))
3594

    
3595
    # Modify arbitrary params in case instance template is ext
3596
    for key, value in params.iteritems():
3597
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3598
          self.instance.disk_template == constants.DT_EXT):
3599
        # stolen from GetUpdatedParams: default means reset/delete
3600
        if value.lower() == constants.VALUE_DEFAULT:
3601
          try:
3602
            del disk.params[key]
3603
          except KeyError:
3604
            pass
3605
        else:
3606
          disk.params[key] = value
3607
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3608

    
3609
    return changes
3610

    
3611
  def _RemoveDisk(self, idx, root, _):
3612
    """Removes a disk.
3613

3614
    """
3615
    hotmsg = ""
3616
    if self.op.hotplug:
3617
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3618
                                   constants.HOTPLUG_TARGET_DISK,
3619
                                   root, None, idx)
3620
      ShutdownInstanceDisks(self, self.instance, [root])
3621

    
3622
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3623
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3624
                             self.instance.primary_node):
3625
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3626
              .fail_msg
3627
      if msg:
3628
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3629
                        " continuing anyway", idx,
3630
                        self.cfg.GetNodeName(node_uuid), msg)
3631

    
3632
    # if this is a DRBD disk, return its port to the pool
3633
    if root.dev_type in constants.DTS_DRBD:
3634
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3635

    
3636
    return hotmsg
3637

    
3638
  def _CreateNewNic(self, idx, params, private):
3639
    """Creates data structure for a new network interface.
3640

3641
    """
3642
    mac = params[constants.INIC_MAC]
3643
    ip = params.get(constants.INIC_IP, None)
3644
    net = params.get(constants.INIC_NETWORK, None)
3645
    name = params.get(constants.INIC_NAME, None)
3646
    net_uuid = self.cfg.LookupNetwork(net)
3647
    #TODO: not private.filled?? can a nic have no nicparams??
3648
    nicparams = private.filled
3649
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3650
                       nicparams=nicparams)
3651
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3652

    
3653
    changes = [
3654
      ("nic.%d" % idx,
3655
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3656
       (mac, ip, private.filled[constants.NIC_MODE],
3657
       private.filled[constants.NIC_LINK], net)),
3658
      ]
3659

    
3660
    if self.op.hotplug:
3661
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3662
                                constants.HOTPLUG_TARGET_NIC,
3663
                                nobj, None, idx)
3664
      changes.append(("nic.%d" % idx, msg))
3665

    
3666
    return (nobj, changes)
3667

    
3668
  def _ApplyNicMods(self, idx, nic, params, private):
3669
    """Modifies a network interface.
3670

3671
    """
3672
    changes = []
3673

    
3674
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3675
      if key in params:
3676
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3677
        setattr(nic, key, params[key])
3678

    
3679
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3680
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3681
    if new_net_uuid != nic.network:
3682
      changes.append(("nic.network/%d" % idx, new_net))
3683
      nic.network = new_net_uuid
3684

    
3685
    if private.filled:
3686
      nic.nicparams = private.filled
3687

    
3688
      for (key, val) in nic.nicparams.items():
3689
        changes.append(("nic.%s/%d" % (key, idx), val))
3690

    
3691
    if self.op.hotplug:
3692
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3693
                                constants.HOTPLUG_TARGET_NIC,
3694
                                nic, None, idx)
3695
      changes.append(("nic/%d" % idx, msg))
3696

    
3697
    return changes
3698

    
3699
  def _RemoveNic(self, idx, nic, _):
3700
    if self.op.hotplug:
3701
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3702
                                 constants.HOTPLUG_TARGET_NIC,
3703
                                 nic, None, idx)
3704

    
3705
  def Exec(self, feedback_fn):
3706
    """Modifies an instance.
3707

3708
    All parameters take effect only at the next restart of the instance.
3709

3710
    """
3711
    # Process here the warnings from CheckPrereq, as we don't have a
3712
    # feedback_fn there.
3713
    # TODO: Replace with self.LogWarning
3714
    for warn in self.warn:
3715
      feedback_fn("WARNING: %s" % warn)
3716

    
3717
    assert ((self.op.disk_template is None) ^
3718
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3719
      "Not owning any node resource locks"
3720

    
3721
    result = []
3722

    
3723
    # New primary node
3724
    if self.op.pnode_uuid:
3725
      self.instance.primary_node = self.op.pnode_uuid
3726

    
3727
    # runtime memory
3728
    if self.op.runtime_mem:
3729
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3730
                                                     self.instance,
3731
                                                     self.op.runtime_mem)
3732
      rpcres.Raise("Cannot modify instance runtime memory")
3733
      result.append(("runtime_memory", self.op.runtime_mem))
3734

    
3735
    # Apply disk changes
3736
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3737
                        self._CreateNewDisk, self._ModifyDisk,
3738
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3739
    _UpdateIvNames(0, self.instance.disks)
3740

    
3741
    if self.op.disk_template:
3742
      if __debug__:
3743
        check_nodes = set(self.instance.all_nodes)
3744
        if self.op.remote_node_uuid:
3745
          check_nodes.add(self.op.remote_node_uuid)
3746
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3747
          owned = self.owned_locks(level)
3748
          assert not (check_nodes - owned), \
3749
            ("Not owning the correct locks, owning %r, expected at least %r" %
3750
             (owned, check_nodes))
3751

    
3752
      r_shut = ShutdownInstanceDisks(self, self.instance)
3753
      if not r_shut:
3754
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3755
                                 " proceed with disk template conversion")
3756
      mode = (self.instance.disk_template, self.op.disk_template)
3757
      try:
3758
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3759
      except:
3760
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3761
        raise
3762
      result.append(("disk_template", self.op.disk_template))
3763

    
3764
      assert self.instance.disk_template == self.op.disk_template, \
3765
        ("Expected disk template '%s', found '%s'" %
3766
         (self.op.disk_template, self.instance.disk_template))
3767

    
3768
    # Release node and resource locks if there are any (they might already have
3769
    # been released during disk conversion)
3770
    ReleaseLocks(self, locking.LEVEL_NODE)
3771
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3772

    
3773
    # Apply NIC changes
3774
    if self._new_nics is not None:
3775
      self.instance.nics = self._new_nics
3776
      result.extend(self._nic_chgdesc)
3777

    
3778
    # hvparams changes
3779
    if self.op.hvparams:
3780
      self.instance.hvparams = self.hv_inst
3781
      for key, val in self.op.hvparams.iteritems():
3782
        result.append(("hv/%s" % key, val))
3783

    
3784
    # beparams changes
3785
    if self.op.beparams:
3786
      self.instance.beparams = self.be_inst
3787
      for key, val in self.op.beparams.iteritems():
3788
        result.append(("be/%s" % key, val))
3789

    
3790
    # OS change
3791
    if self.op.os_name:
3792
      self.instance.os = self.op.os_name
3793

    
3794
    # osparams changes
3795
    if self.op.osparams:
3796
      self.instance.osparams = self.os_inst
3797
      for key, val in self.op.osparams.iteritems():
3798
        result.append(("os/%s" % key, val))
3799

    
3800
    if self.op.osparams_private:
3801
      self.instance.osparams_private = self.os_inst_private
3802
      for key, val in self.op.osparams_private.iteritems():
3803
        # Show the Private(...) blurb.
3804
        result.append(("os_private/%s" % key, repr(val)))
3805

    
3806
    if self.op.offline is None:
3807
      # Ignore
3808
      pass
3809
    elif self.op.offline:
3810
      # Mark instance as offline
3811
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3812
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3813
    else:
3814
      # Mark instance as online, but stopped
3815
      self.cfg.MarkInstanceDown(self.instance.uuid)
3816
      result.append(("admin_state", constants.ADMINST_DOWN))
3817

    
3818
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3819

    
3820
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3821
                self.owned_locks(locking.LEVEL_NODE)), \
3822
      "All node locks should have been released by now"
3823

    
3824
    return result
3825

    
3826
  _DISK_CONVERSIONS = {
3827
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3828
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3829
    }
3830

    
3831

    
3832
class LUInstanceChangeGroup(LogicalUnit):
3833
  HPATH = "instance-change-group"
3834
  HTYPE = constants.HTYPE_INSTANCE
3835
  REQ_BGL = False
3836

    
3837
  def ExpandNames(self):
3838
    self.share_locks = ShareAll()
3839

    
3840
    self.needed_locks = {
3841
      locking.LEVEL_NODEGROUP: [],
3842
      locking.LEVEL_NODE: [],
3843
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3844
      }
3845

    
3846
    self._ExpandAndLockInstance()
3847

    
3848
    if self.op.target_groups:
3849
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3850
                                  self.op.target_groups)
3851
    else:
3852
      self.req_target_uuids = None
3853

    
3854
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3855

    
3856
  def DeclareLocks(self, level):
3857
    if level == locking.LEVEL_NODEGROUP:
3858
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3859

    
3860
      if self.req_target_uuids:
3861
        lock_groups = set(self.req_target_uuids)
3862

    
3863
        # Lock all groups used by instance optimistically; this requires going
3864
        # via the node before it's locked, requiring verification later on
3865
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3866
        lock_groups.update(instance_groups)
3867
      else:
3868
        # No target groups, need to lock all of them
3869
        lock_groups = locking.ALL_SET
3870

    
3871
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3872

    
3873
    elif level == locking.LEVEL_NODE:
3874
      if self.req_target_uuids:
3875
        # Lock all nodes used by instances
3876
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3877
        self._LockInstancesNodes()
3878

    
3879
        # Lock all nodes in all potential target groups
3880
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3881
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3882
        member_nodes = [node_uuid
3883
                        for group in lock_groups
3884
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3885
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3886
      else:
3887
        # Lock all nodes as all groups are potential targets
3888
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3889

    
3890
  def CheckPrereq(self):
3891
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3892
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3893
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3894

    
3895
    assert (self.req_target_uuids is None or
3896
            owned_groups.issuperset(self.req_target_uuids))
3897
    assert owned_instance_names == set([self.op.instance_name])
3898

    
3899
    # Get instance information
3900
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3901

    
3902
    # Check if node groups for locked instance are still correct
3903
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3904
      ("Instance %s's nodes changed while we kept the lock" %
3905
       self.op.instance_name)
3906

    
3907
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3908
                                          owned_groups)
3909

    
3910
    if self.req_target_uuids:
3911
      # User requested specific target groups
3912
      self.target_uuids = frozenset(self.req_target_uuids)
3913
    else:
3914
      # All groups except those used by the instance are potential targets
3915
      self.target_uuids = owned_groups - inst_groups
3916

    
3917
    conflicting_groups = self.target_uuids & inst_groups
3918
    if conflicting_groups:
3919
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3920
                                 " used by the instance '%s'" %
3921
                                 (utils.CommaJoin(conflicting_groups),
3922
                                  self.op.instance_name),
3923
                                 errors.ECODE_INVAL)
3924

    
3925
    if not self.target_uuids:
3926
      raise errors.OpPrereqError("There are no possible target groups",
3927
                                 errors.ECODE_INVAL)
3928

    
3929
  def BuildHooksEnv(self):
3930
    """Build hooks env.
3931

3932
    """
3933
    assert self.target_uuids
3934

    
3935
    env = {
3936
      "TARGET_GROUPS": " ".join(self.target_uuids),
3937
      }
3938

    
3939
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3940

    
3941
    return env
3942

    
3943
  def BuildHooksNodes(self):
3944
    """Build hooks nodes.
3945

3946
    """
3947
    mn = self.cfg.GetMasterNode()
3948
    return ([mn], [mn])
3949

    
3950
  def Exec(self, feedback_fn):
3951
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3952

    
3953
    assert instances == [self.op.instance_name], "Instance not locked"
3954

    
3955
    req = iallocator.IAReqGroupChange(instances=instances,
3956
                                      target_groups=list(self.target_uuids))
3957
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3958

    
3959
    ial.Run(self.op.iallocator)
3960

    
3961
    if not ial.success:
3962
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3963
                                 " instance '%s' using iallocator '%s': %s" %
3964
                                 (self.op.instance_name, self.op.iallocator,
3965
                                  ial.info), errors.ECODE_NORES)
3966

    
3967
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3968

    
3969
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3970
                 " instance '%s'", len(jobs), self.op.instance_name)
3971

    
3972
    return ResultWithJobs(jobs)