Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 1c4910f7

History | View | Annotate | Download (157.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import serializer
41
from ganeti import ssh
42
import ganeti.rpc.node as rpc
43
from ganeti import utils
44

    
45
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
46

    
47
from ganeti.cmdlib.common import INSTANCE_DOWN, \
48
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
49
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
50
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
51
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \
52
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
53
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
54
  CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
55
from ganeti.cmdlib.instance_storage import CreateDisks, \
56
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
57
  WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
58
  ComputeDisks, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
59
  GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
60
  AssembleInstanceDisks, CheckSpindlesExclusiveStorage
61
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
62
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
63
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
64
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
65
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
66
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
67

    
68
import ganeti.masterd.instance
69

    
70

    
71
#: Type description for changes as returned by L{_ApplyContainerMods}'s
72
#: callbacks
73
_TApplyContModsCbChanges = \
74
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
75
    ht.TNonEmptyString,
76
    ht.TAny,
77
    ])))
78

    
79

    
80
def _CheckHostnameSane(lu, name):
81
  """Ensures that a given hostname resolves to a 'sane' name.
82

83
  The given name is required to be a prefix of the resolved hostname,
84
  to prevent accidental mismatches.
85

86
  @param lu: the logical unit on behalf of which we're checking
87
  @param name: the name we should resolve and check
88
  @return: the resolved hostname object
89

90
  """
91
  hostname = netutils.GetHostname(name=name)
92
  if hostname.name != name:
93
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
94
  if not utils.MatchNameComponent(name, [hostname.name]):
95
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
96
                                " same as given hostname '%s'") %
97
                               (hostname.name, name), errors.ECODE_INVAL)
98
  return hostname
99

    
100

    
101
def _CheckOpportunisticLocking(op):
102
  """Generate error if opportunistic locking is not possible.
103

104
  """
105
  if op.opportunistic_locking and not op.iallocator:
106
    raise errors.OpPrereqError("Opportunistic locking is only available in"
107
                               " combination with an instance allocator",
108
                               errors.ECODE_INVAL)
109

    
110

    
111
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
112
  """Wrapper around IAReqInstanceAlloc.
113

114
  @param op: The instance opcode
115
  @param disks: The computed disks
116
  @param nics: The computed nics
117
  @param beparams: The full filled beparams
118
  @param node_name_whitelist: List of nodes which should appear as online to the
119
    allocator (unless the node is already marked offline)
120

121
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
122

123
  """
124
  spindle_use = beparams[constants.BE_SPINDLE_USE]
125
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
126
                                       disk_template=op.disk_template,
127
                                       tags=op.tags,
128
                                       os=op.os_type,
129
                                       vcpus=beparams[constants.BE_VCPUS],
130
                                       memory=beparams[constants.BE_MAXMEM],
131
                                       spindle_use=spindle_use,
132
                                       disks=disks,
133
                                       nics=[n.ToDict() for n in nics],
134
                                       hypervisor=op.hypervisor,
135
                                       node_whitelist=node_name_whitelist)
136

    
137

    
138
def _ComputeFullBeParams(op, cluster):
139
  """Computes the full beparams.
140

141
  @param op: The instance opcode
142
  @param cluster: The cluster config object
143

144
  @return: The fully filled beparams
145

146
  """
147
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
148
  for param, value in op.beparams.iteritems():
149
    if value == constants.VALUE_AUTO:
150
      op.beparams[param] = default_beparams[param]
151
  objects.UpgradeBeParams(op.beparams)
152
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
153
  return cluster.SimpleFillBE(op.beparams)
154

    
155

    
156
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
157
  """Computes the nics.
158

159
  @param op: The instance opcode
160
  @param cluster: Cluster configuration object
161
  @param default_ip: The default ip to assign
162
  @param cfg: An instance of the configuration object
163
  @param ec_id: Execution context ID
164

165
  @returns: The build up nics
166

167
  """
168
  nics = []
169
  for nic in op.nics:
170
    nic_mode_req = nic.get(constants.INIC_MODE, None)
171
    nic_mode = nic_mode_req
172
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
173
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
174

    
175
    net = nic.get(constants.INIC_NETWORK, None)
176
    link = nic.get(constants.NIC_LINK, None)
177
    ip = nic.get(constants.INIC_IP, None)
178
    vlan = nic.get(constants.INIC_VLAN, None)
179

    
180
    if net is None or net.lower() == constants.VALUE_NONE:
181
      net = None
182
    else:
183
      if nic_mode_req is not None or link is not None:
184
        raise errors.OpPrereqError("If network is given, no mode or link"
185
                                   " is allowed to be passed",
186
                                   errors.ECODE_INVAL)
187

    
188
    # ip validity checks
189
    if ip is None or ip.lower() == constants.VALUE_NONE:
190
      nic_ip = None
191
    elif ip.lower() == constants.VALUE_AUTO:
192
      if not op.name_check:
193
        raise errors.OpPrereqError("IP address set to auto but name checks"
194
                                   " have been skipped",
195
                                   errors.ECODE_INVAL)
196
      nic_ip = default_ip
197
    else:
198
      # We defer pool operations until later, so that the iallocator has
199
      # filled in the instance's node(s) dimara
200
      if ip.lower() == constants.NIC_IP_POOL:
201
        if net is None:
202
          raise errors.OpPrereqError("if ip=pool, parameter network"
203
                                     " must be passed too",
204
                                     errors.ECODE_INVAL)
205

    
206
      elif not netutils.IPAddress.IsValid(ip):
207
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
208
                                   errors.ECODE_INVAL)
209

    
210
      nic_ip = ip
211

    
212
    # TODO: check the ip address for uniqueness
213
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
214
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
215
                                 errors.ECODE_INVAL)
216

    
217
    # MAC address verification
218
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
219
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
220
      mac = utils.NormalizeAndValidateMac(mac)
221

    
222
      try:
223
        # TODO: We need to factor this out
224
        cfg.ReserveMAC(mac, ec_id)
225
      except errors.ReservationError:
226
        raise errors.OpPrereqError("MAC address %s already in use"
227
                                   " in cluster" % mac,
228
                                   errors.ECODE_NOTUNIQUE)
229

    
230
    #  Build nic parameters
231
    nicparams = {}
232
    if nic_mode_req:
233
      nicparams[constants.NIC_MODE] = nic_mode
234
    if link:
235
      nicparams[constants.NIC_LINK] = link
236
    if vlan:
237
      nicparams[constants.NIC_VLAN] = vlan
238

    
239
    check_params = cluster.SimpleFillNIC(nicparams)
240
    objects.NIC.CheckParameterSyntax(check_params)
241
    net_uuid = cfg.LookupNetwork(net)
242
    name = nic.get(constants.INIC_NAME, None)
243
    if name is not None and name.lower() == constants.VALUE_NONE:
244
      name = None
245
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
246
                          network=net_uuid, nicparams=nicparams)
247
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
248
    nics.append(nic_obj)
249

    
250
  return nics
251

    
252

    
253
def _CheckForConflictingIp(lu, ip, node_uuid):
254
  """In case of conflicting IP address raise error.
255

256
  @type ip: string
257
  @param ip: IP address
258
  @type node_uuid: string
259
  @param node_uuid: node UUID
260

261
  """
262
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
263
  if conf_net is not None:
264
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
265
                                " network %s, but the target NIC does not." %
266
                                (ip, conf_net)),
267
                               errors.ECODE_STATE)
268

    
269
  return (None, None)
270

    
271

    
272
def _ComputeIPolicyInstanceSpecViolation(
273
  ipolicy, instance_spec, disk_template,
274
  _compute_fn=ComputeIPolicySpecViolation):
275
  """Compute if instance specs meets the specs of ipolicy.
276

277
  @type ipolicy: dict
278
  @param ipolicy: The ipolicy to verify against
279
  @param instance_spec: dict
280
  @param instance_spec: The instance spec to verify
281
  @type disk_template: string
282
  @param disk_template: the disk template of the instance
283
  @param _compute_fn: The function to verify ipolicy (unittest only)
284
  @see: L{ComputeIPolicySpecViolation}
285

286
  """
287
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
288
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
289
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
290
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
291
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
292
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
293

    
294
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
295
                     disk_sizes, spindle_use, disk_template)
296

    
297

    
298
def _ComputeInstanceCommunicationNIC(instance_name):
299
  """Compute the name of the instance NIC used by instance
300
  communication.
301

302
  With instance communication, a new NIC is added to the instance.
303
  This NIC has a special name that identities it as being part of
304
  instance communication, and not just a normal NIC.  This function
305
  generates the name of the NIC based on a prefix and the instance
306
  name
307

308
  @type instance_name: string
309
  @param instance_name: name of the instance the NIC belongs to
310

311
  @rtype: string
312
  @return: name of the NIC
313

314
  """
315
  return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
316

    
317

    
318
class LUInstanceCreate(LogicalUnit):
319
  """Create an instance.
320

321
  """
322
  HPATH = "instance-add"
323
  HTYPE = constants.HTYPE_INSTANCE
324
  REQ_BGL = False
325

    
326
  def _CheckDiskTemplateValid(self):
327
    """Checks validity of disk template.
328

329
    """
330
    cluster = self.cfg.GetClusterInfo()
331
    if self.op.disk_template is None:
332
      # FIXME: It would be better to take the default disk template from the
333
      # ipolicy, but for the ipolicy we need the primary node, which we get from
334
      # the iallocator, which wants the disk template as input. To solve this
335
      # chicken-and-egg problem, it should be possible to specify just a node
336
      # group from the iallocator and take the ipolicy from that.
337
      self.op.disk_template = cluster.enabled_disk_templates[0]
338
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339

    
340
  def _CheckDiskArguments(self):
341
    """Checks validity of disk-related arguments.
342

343
    """
344
    # check that disk's names are unique and valid
345
    utils.ValidateDeviceNames("disk", self.op.disks)
346

    
347
    self._CheckDiskTemplateValid()
348

    
349
    # check disks. parameter names and consistent adopt/no-adopt strategy
350
    has_adopt = has_no_adopt = False
351
    for disk in self.op.disks:
352
      if self.op.disk_template != constants.DT_EXT:
353
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354
      if constants.IDISK_ADOPT in disk:
355
        has_adopt = True
356
      else:
357
        has_no_adopt = True
358
    if has_adopt and has_no_adopt:
359
      raise errors.OpPrereqError("Either all disks are adopted or none is",
360
                                 errors.ECODE_INVAL)
361
    if has_adopt:
362
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363
        raise errors.OpPrereqError("Disk adoption is not supported for the"
364
                                   " '%s' disk template" %
365
                                   self.op.disk_template,
366
                                   errors.ECODE_INVAL)
367
      if self.op.iallocator is not None:
368
        raise errors.OpPrereqError("Disk adoption not allowed with an"
369
                                   " iallocator script", errors.ECODE_INVAL)
370
      if self.op.mode == constants.INSTANCE_IMPORT:
371
        raise errors.OpPrereqError("Disk adoption not allowed for"
372
                                   " instance import", errors.ECODE_INVAL)
373
    else:
374
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
375
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376
                                   " but no 'adopt' parameter given" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379

    
380
    self.adopt_disks = has_adopt
381

    
382
  def _CheckVLANArguments(self):
383
    """ Check validity of VLANs if given
384

385
    """
386
    for nic in self.op.nics:
387
      vlan = nic.get(constants.INIC_VLAN, None)
388
      if vlan:
389
        if vlan[0] == ".":
390
          # vlan starting with dot means single untagged vlan,
391
          # might be followed by trunk (:)
392
          if not vlan[1:].isdigit():
393
            vlanlist = vlan[1:].split(':')
394
            for vl in vlanlist:
395
              if not vl.isdigit():
396
                raise errors.OpPrereqError("Specified VLAN parameter is "
397
                                           "invalid : %s" % vlan,
398
                                             errors.ECODE_INVAL)
399
        elif vlan[0] == ":":
400
          # Trunk - tagged only
401
          vlanlist = vlan[1:].split(':')
402
          for vl in vlanlist:
403
            if not vl.isdigit():
404
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
405
                                           " : %s" % vlan, errors.ECODE_INVAL)
406
        elif vlan.isdigit():
407
          # This is the simplest case. No dots, only single digit
408
          # -> Create untagged access port, dot needs to be added
409
          nic[constants.INIC_VLAN] = "." + vlan
410
        else:
411
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
412
                                       " : %s" % vlan, errors.ECODE_INVAL)
413

    
414
  def CheckArguments(self):
415
    """Check arguments.
416

417
    """
418
    # do not require name_check to ease forward/backward compatibility
419
    # for tools
420
    if self.op.no_install and self.op.start:
421
      self.LogInfo("No-installation mode selected, disabling startup")
422
      self.op.start = False
423
    # validate/normalize the instance name
424
    self.op.instance_name = \
425
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
426

    
427
    if self.op.ip_check and not self.op.name_check:
428
      # TODO: make the ip check more flexible and not depend on the name check
429
      raise errors.OpPrereqError("Cannot do IP address check without a name"
430
                                 " check", errors.ECODE_INVAL)
431

    
432
    # add NIC for instance communication
433
    if self.op.instance_communication:
434
      nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
435

    
436
      self.op.nics.append({constants.INIC_NAME: nic_name,
437
                           constants.INIC_MAC: constants.VALUE_GENERATE,
438
                           constants.INIC_IP: constants.NIC_IP_POOL,
439
                           constants.INIC_NETWORK:
440
                             self.cfg.GetInstanceCommunicationNetwork()})
441

    
442
    # check nics' parameter names
443
    for nic in self.op.nics:
444
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
445
    # check that NIC's parameters names are unique and valid
446
    utils.ValidateDeviceNames("NIC", self.op.nics)
447

    
448
    self._CheckVLANArguments()
449

    
450
    self._CheckDiskArguments()
451
    assert self.op.disk_template is not None
452

    
453
    # instance name verification
454
    if self.op.name_check:
455
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
456
      self.op.instance_name = self.hostname.name
457
      # used in CheckPrereq for ip ping check
458
      self.check_ip = self.hostname.ip
459
    else:
460
      self.check_ip = None
461

    
462
    # file storage checks
463
    if (self.op.file_driver and
464
        not self.op.file_driver in constants.FILE_DRIVER):
465
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
466
                                 self.op.file_driver, errors.ECODE_INVAL)
467

    
468
    # set default file_driver if unset and required
469
    if (not self.op.file_driver and
470
        self.op.disk_template in constants.DTS_FILEBASED):
471
      self.op.file_driver = constants.FD_LOOP
472

    
473
    ### Node/iallocator related checks
474
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
475

    
476
    if self.op.pnode is not None:
477
      if self.op.disk_template in constants.DTS_INT_MIRROR:
478
        if self.op.snode is None:
479
          raise errors.OpPrereqError("The networked disk templates need"
480
                                     " a mirror node", errors.ECODE_INVAL)
481
      elif self.op.snode:
482
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
483
                        " template")
484
        self.op.snode = None
485

    
486
    _CheckOpportunisticLocking(self.op)
487

    
488
    if self.op.mode == constants.INSTANCE_IMPORT:
489
      # On import force_variant must be True, because if we forced it at
490
      # initial install, our only chance when importing it back is that it
491
      # works again!
492
      self.op.force_variant = True
493

    
494
      if self.op.no_install:
495
        self.LogInfo("No-installation mode has no effect during import")
496

    
497
      if objects.GetOSImage(self.op.osparams):
498
        self.LogInfo("OS image has no effect during import")
499
    elif self.op.mode == constants.INSTANCE_CREATE:
500
      os_image = CheckOSImage(self.op)
501

    
502
      if self.op.os_type is None and os_image is None:
503
        raise errors.OpPrereqError("No guest OS or OS image specified",
504
                                   errors.ECODE_INVAL)
505

    
506
      if self.op.os_type is not None \
507
            and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
508
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
509
                                   " installation" % self.op.os_type,
510
                                   errors.ECODE_STATE)
511
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
512
      if objects.GetOSImage(self.op.osparams):
513
        self.LogInfo("OS image has no effect during import")
514

    
515
      self._cds = GetClusterDomainSecret()
516

    
517
      # Check handshake to ensure both clusters have the same domain secret
518
      src_handshake = self.op.source_handshake
519
      if not src_handshake:
520
        raise errors.OpPrereqError("Missing source handshake",
521
                                   errors.ECODE_INVAL)
522

    
523
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
524
                                                           src_handshake)
525
      if errmsg:
526
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
527
                                   errors.ECODE_INVAL)
528

    
529
      # Load and check source CA
530
      self.source_x509_ca_pem = self.op.source_x509_ca
531
      if not self.source_x509_ca_pem:
532
        raise errors.OpPrereqError("Missing source X509 CA",
533
                                   errors.ECODE_INVAL)
534

    
535
      try:
536
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
537
                                                    self._cds)
538
      except OpenSSL.crypto.Error, err:
539
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
540
                                   (err, ), errors.ECODE_INVAL)
541

    
542
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
543
      if errcode is not None:
544
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
545
                                   errors.ECODE_INVAL)
546

    
547
      self.source_x509_ca = cert
548

    
549
      src_instance_name = self.op.source_instance_name
550
      if not src_instance_name:
551
        raise errors.OpPrereqError("Missing source instance name",
552
                                   errors.ECODE_INVAL)
553

    
554
      self.source_instance_name = \
555
        netutils.GetHostname(name=src_instance_name).name
556

    
557
    else:
558
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
559
                                 self.op.mode, errors.ECODE_INVAL)
560

    
561
  def ExpandNames(self):
562
    """ExpandNames for CreateInstance.
563

564
    Figure out the right locks for instance creation.
565

566
    """
567
    self.needed_locks = {}
568

    
569
    # this is just a preventive check, but someone might still add this
570
    # instance in the meantime, and creation will fail at lock-add time
571
    if self.op.instance_name in\
572
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
573
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
574
                                 self.op.instance_name, errors.ECODE_EXISTS)
575

    
576
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
577

    
578
    if self.op.iallocator:
579
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
580
      # specifying a group on instance creation and then selecting nodes from
581
      # that group
582
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
583
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
584

    
585
      if self.op.opportunistic_locking:
586
        self.opportunistic_locks[locking.LEVEL_NODE] = True
587
    else:
588
      (self.op.pnode_uuid, self.op.pnode) = \
589
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
590
      nodelist = [self.op.pnode_uuid]
591
      if self.op.snode is not None:
592
        (self.op.snode_uuid, self.op.snode) = \
593
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
594
        nodelist.append(self.op.snode_uuid)
595
      self.needed_locks[locking.LEVEL_NODE] = nodelist
596

    
597
    # in case of import lock the source node too
598
    if self.op.mode == constants.INSTANCE_IMPORT:
599
      src_node = self.op.src_node
600
      src_path = self.op.src_path
601

    
602
      if src_path is None:
603
        self.op.src_path = src_path = self.op.instance_name
604

    
605
      if src_node is None:
606
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
607
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
608
        self.op.src_node = None
609
        if os.path.isabs(src_path):
610
          raise errors.OpPrereqError("Importing an instance from a path"
611
                                     " requires a source node option",
612
                                     errors.ECODE_INVAL)
613
      else:
614
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
615
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
616
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
617
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
618
        if not os.path.isabs(src_path):
619
          self.op.src_path = \
620
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
621

    
622
    self.needed_locks[locking.LEVEL_NODE_RES] = \
623
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
624

    
625
    # Optimistically acquire shared group locks (we're reading the
626
    # configuration).  We can't just call GetInstanceNodeGroups, because the
627
    # instance doesn't exist yet. Therefore we lock all node groups of all
628
    # nodes we have.
629
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
630
      # In the case we lock all nodes for opportunistic allocation, we have no
631
      # choice than to lock all groups, because they're allocated before nodes.
632
      # This is sad, but true. At least we release all those we don't need in
633
      # CheckPrereq later.
634
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
635
    else:
636
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
637
        list(self.cfg.GetNodeGroupsFromNodes(
638
          self.needed_locks[locking.LEVEL_NODE]))
639
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
640

    
641
  def DeclareLocks(self, level):
642
    if level == locking.LEVEL_NODE_RES and \
643
      self.opportunistic_locks[locking.LEVEL_NODE]:
644
      # Even when using opportunistic locking, we require the same set of
645
      # NODE_RES locks as we got NODE locks
646
      self.needed_locks[locking.LEVEL_NODE_RES] = \
647
        self.owned_locks(locking.LEVEL_NODE)
648

    
649
  def _RunAllocator(self):
650
    """Run the allocator based on input opcode.
651

652
    """
653
    if self.op.opportunistic_locking:
654
      # Only consider nodes for which a lock is held
655
      node_name_whitelist = self.cfg.GetNodeNames(
656
        self.owned_locks(locking.LEVEL_NODE))
657
    else:
658
      node_name_whitelist = None
659

    
660
    req = _CreateInstanceAllocRequest(self.op, self.disks,
661
                                      self.nics, self.be_full,
662
                                      node_name_whitelist)
663
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
664

    
665
    ial.Run(self.op.iallocator)
666

    
667
    if not ial.success:
668
      # When opportunistic locks are used only a temporary failure is generated
669
      if self.op.opportunistic_locking:
670
        ecode = errors.ECODE_TEMP_NORES
671
      else:
672
        ecode = errors.ECODE_NORES
673

    
674
      raise errors.OpPrereqError("Can't compute nodes using"
675
                                 " iallocator '%s': %s" %
676
                                 (self.op.iallocator, ial.info),
677
                                 ecode)
678

    
679
    (self.op.pnode_uuid, self.op.pnode) = \
680
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
681
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
682
                 self.op.instance_name, self.op.iallocator,
683
                 utils.CommaJoin(ial.result))
684

    
685
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
686

    
687
    if req.RequiredNodes() == 2:
688
      (self.op.snode_uuid, self.op.snode) = \
689
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
690

    
691
  def BuildHooksEnv(self):
692
    """Build hooks env.
693

694
    This runs on master, primary and secondary nodes of the instance.
695

696
    """
697
    env = {
698
      "ADD_MODE": self.op.mode,
699
      }
700
    if self.op.mode == constants.INSTANCE_IMPORT:
701
      env["SRC_NODE"] = self.op.src_node
702
      env["SRC_PATH"] = self.op.src_path
703
      env["SRC_IMAGES"] = self.src_images
704

    
705
    env.update(BuildInstanceHookEnv(
706
      name=self.op.instance_name,
707
      primary_node_name=self.op.pnode,
708
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
709
      status=self.op.start,
710
      os_type=self.op.os_type,
711
      minmem=self.be_full[constants.BE_MINMEM],
712
      maxmem=self.be_full[constants.BE_MAXMEM],
713
      vcpus=self.be_full[constants.BE_VCPUS],
714
      nics=NICListToTuple(self, self.nics),
715
      disk_template=self.op.disk_template,
716
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
717
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
718
             for d in self.disks],
719
      bep=self.be_full,
720
      hvp=self.hv_full,
721
      hypervisor_name=self.op.hypervisor,
722
      tags=self.op.tags,
723
      ))
724

    
725
    return env
726

    
727
  def BuildHooksNodes(self):
728
    """Build hooks nodes.
729

730
    """
731
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
732
    return nl, nl
733

    
734
  def _ReadExportInfo(self):
735
    """Reads the export information from disk.
736

737
    It will override the opcode source node and path with the actual
738
    information, if these two were not specified before.
739

740
    @return: the export information
741

742
    """
743
    assert self.op.mode == constants.INSTANCE_IMPORT
744

    
745
    if self.op.src_node_uuid is None:
746
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
747
      exp_list = self.rpc.call_export_list(locked_nodes)
748
      found = False
749
      for node_uuid in exp_list:
750
        if exp_list[node_uuid].fail_msg:
751
          continue
752
        if self.op.src_path in exp_list[node_uuid].payload:
753
          found = True
754
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
755
          self.op.src_node_uuid = node_uuid
756
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
757
                                            self.op.src_path)
758
          break
759
      if not found:
760
        raise errors.OpPrereqError("No export found for relative path %s" %
761
                                   self.op.src_path, errors.ECODE_INVAL)
762

    
763
    CheckNodeOnline(self, self.op.src_node_uuid)
764
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
765
    result.Raise("No export or invalid export found in dir %s" %
766
                 self.op.src_path)
767

    
768
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
769
    if not export_info.has_section(constants.INISECT_EXP):
770
      raise errors.ProgrammerError("Corrupted export config",
771
                                   errors.ECODE_ENVIRON)
772

    
773
    ei_version = export_info.get(constants.INISECT_EXP, "version")
774
    if int(ei_version) != constants.EXPORT_VERSION:
775
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
776
                                 (ei_version, constants.EXPORT_VERSION),
777
                                 errors.ECODE_ENVIRON)
778
    return export_info
779

    
780
  def _ReadExportParams(self, einfo):
781
    """Use export parameters as defaults.
782

783
    In case the opcode doesn't specify (as in override) some instance
784
    parameters, then try to use them from the export information, if
785
    that declares them.
786

787
    """
788
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
789

    
790
    if not self.op.disks:
791
      disks = []
792
      # TODO: import the disk iv_name too
793
      for idx in range(constants.MAX_DISKS):
794
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
795
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
796
          disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
797
          disk = {
798
            constants.IDISK_SIZE: disk_sz,
799
            constants.IDISK_NAME: disk_name
800
            }
801
          disks.append(disk)
802
      self.op.disks = disks
803
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
804
        raise errors.OpPrereqError("No disk info specified and the export"
805
                                   " is missing the disk information",
806
                                   errors.ECODE_INVAL)
807

    
808
    if not self.op.nics:
809
      nics = []
810
      for idx in range(constants.MAX_NICS):
811
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
812
          ndict = {}
813
          for name in [constants.INIC_IP,
814
                       constants.INIC_MAC, constants.INIC_NAME]:
815
            nic_param_name = "nic%d_%s" % (idx, name)
816
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
817
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
818
              ndict[name] = v
819
          network = einfo.get(constants.INISECT_INS,
820
                              "nic%d_%s" % (idx, constants.INIC_NETWORK))
821
          # in case network is given link and mode are inherited
822
          # from nodegroup's netparams and thus should not be passed here
823
          if network:
824
            ndict[constants.INIC_NETWORK] = network
825
          else:
826
            for name in list(constants.NICS_PARAMETERS):
827
              v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
828
              ndict[name] = v
829
          nics.append(ndict)
830
        else:
831
          break
832
      self.op.nics = nics
833

    
834
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
835
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
836

    
837
    if (self.op.hypervisor is None and
838
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
839
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
840

    
841
    if einfo.has_section(constants.INISECT_HYP):
842
      # use the export parameters but do not override the ones
843
      # specified by the user
844
      for name, value in einfo.items(constants.INISECT_HYP):
845
        if name not in self.op.hvparams:
846
          self.op.hvparams[name] = value
847

    
848
    if einfo.has_section(constants.INISECT_BEP):
849
      # use the parameters, without overriding
850
      for name, value in einfo.items(constants.INISECT_BEP):
851
        if name not in self.op.beparams:
852
          self.op.beparams[name] = value
853
        # Compatibility for the old "memory" be param
854
        if name == constants.BE_MEMORY:
855
          if constants.BE_MAXMEM not in self.op.beparams:
856
            self.op.beparams[constants.BE_MAXMEM] = value
857
          if constants.BE_MINMEM not in self.op.beparams:
858
            self.op.beparams[constants.BE_MINMEM] = value
859
    else:
860
      # try to read the parameters old style, from the main section
861
      for name in constants.BES_PARAMETERS:
862
        if (name not in self.op.beparams and
863
            einfo.has_option(constants.INISECT_INS, name)):
864
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
865

    
866
    if einfo.has_section(constants.INISECT_OSP):
867
      # use the parameters, without overriding
868
      for name, value in einfo.items(constants.INISECT_OSP):
869
        if name not in self.op.osparams:
870
          self.op.osparams[name] = value
871

    
872
    if einfo.has_section(constants.INISECT_OSP_PRIVATE):
873
      # use the parameters, without overriding
874
      for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
875
        if name not in self.op.osparams_private:
876
          self.op.osparams_private[name] = serializer.Private(value, descr=name)
877

    
878
  def _RevertToDefaults(self, cluster):
879
    """Revert the instance parameters to the default values.
880

881
    """
882
    # hvparams
883
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
884
    for name in self.op.hvparams.keys():
885
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
886
        del self.op.hvparams[name]
887
    # beparams
888
    be_defs = cluster.SimpleFillBE({})
889
    for name in self.op.beparams.keys():
890
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
891
        del self.op.beparams[name]
892
    # nic params
893
    nic_defs = cluster.SimpleFillNIC({})
894
    for nic in self.op.nics:
895
      for name in constants.NICS_PARAMETERS:
896
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
897
          del nic[name]
898
    # osparams
899
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
900
    for name in self.op.osparams.keys():
901
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
902
        del self.op.osparams[name]
903

    
904
    os_defs_ = cluster.SimpleFillOS(self.op.os_type, {},
905
                                    os_params_private={})
906
    for name in self.op.osparams_private.keys():
907
      if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]:
908
        del self.op.osparams_private[name]
909

    
910
  def _CalculateFileStorageDir(self):
911
    """Calculate final instance file storage dir.
912

913
    """
914
    # file storage dir calculation/check
915
    self.instance_file_storage_dir = None
916
    if self.op.disk_template in constants.DTS_FILEBASED:
917
      # build the full file storage dir path
918
      joinargs = []
919

    
920
      cfg_storage = None
921
      if self.op.disk_template == constants.DT_FILE:
922
        cfg_storage = self.cfg.GetFileStorageDir()
923
      elif self.op.disk_template == constants.DT_SHARED_FILE:
924
        cfg_storage = self.cfg.GetSharedFileStorageDir()
925
      elif self.op.disk_template == constants.DT_GLUSTER:
926
        cfg_storage = self.cfg.GetGlusterStorageDir()
927

    
928
      if not cfg_storage:
929
        raise errors.OpPrereqError(
930
          "Cluster file storage dir for {tpl} storage type not defined".format(
931
            tpl=repr(self.op.disk_template)
932
          ),
933
          errors.ECODE_STATE
934
      )
935

    
936
      joinargs.append(cfg_storage)
937

    
938
      if self.op.file_storage_dir is not None:
939
        joinargs.append(self.op.file_storage_dir)
940

    
941
      if self.op.disk_template != constants.DT_GLUSTER:
942
        joinargs.append(self.op.instance_name)
943

    
944
      if len(joinargs) > 1:
945
        # pylint: disable=W0142
946
        self.instance_file_storage_dir = utils.PathJoin(*joinargs)
947
      else:
948
        self.instance_file_storage_dir = joinargs[0]
949

    
950
  def CheckPrereq(self): # pylint: disable=R0914
951
    """Check prerequisites.
952

953
    """
954
    # Check that the optimistically acquired groups are correct wrt the
955
    # acquired nodes
956
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
957
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
958
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
959
    if not owned_groups.issuperset(cur_groups):
960
      raise errors.OpPrereqError("New instance %s's node groups changed since"
961
                                 " locks were acquired, current groups are"
962
                                 " are '%s', owning groups '%s'; retry the"
963
                                 " operation" %
964
                                 (self.op.instance_name,
965
                                  utils.CommaJoin(cur_groups),
966
                                  utils.CommaJoin(owned_groups)),
967
                                 errors.ECODE_STATE)
968

    
969
    self._CalculateFileStorageDir()
970

    
971
    if self.op.mode == constants.INSTANCE_IMPORT:
972
      export_info = self._ReadExportInfo()
973
      self._ReadExportParams(export_info)
974
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
975
    else:
976
      self._old_instance_name = None
977

    
978
    if (not self.cfg.GetVGName() and
979
        self.op.disk_template not in constants.DTS_NOT_LVM):
980
      raise errors.OpPrereqError("Cluster does not support lvm-based"
981
                                 " instances", errors.ECODE_STATE)
982

    
983
    if (self.op.hypervisor is None or
984
        self.op.hypervisor == constants.VALUE_AUTO):
985
      self.op.hypervisor = self.cfg.GetHypervisorType()
986

    
987
    cluster = self.cfg.GetClusterInfo()
988
    enabled_hvs = cluster.enabled_hypervisors
989
    if self.op.hypervisor not in enabled_hvs:
990
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
991
                                 " cluster (%s)" %
992
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
993
                                 errors.ECODE_STATE)
994

    
995
    # Check tag validity
996
    for tag in self.op.tags:
997
      objects.TaggableObject.ValidateTag(tag)
998

    
999
    # check hypervisor parameter syntax (locally)
1000
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
1001
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
1002
                                      self.op.hvparams)
1003
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
1004
    hv_type.CheckParameterSyntax(filled_hvp)
1005
    self.hv_full = filled_hvp
1006
    # check that we don't specify global parameters on an instance
1007
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
1008
                         "instance", "cluster")
1009

    
1010
    # fill and remember the beparams dict
1011
    self.be_full = _ComputeFullBeParams(self.op, cluster)
1012

    
1013
    # build os parameters
1014
    if self.op.osparams_private is None:
1015
      self.op.osparams_private = serializer.PrivateDict()
1016
    if self.op.osparams_secret is None:
1017
      self.op.osparams_secret = serializer.PrivateDict()
1018

    
1019
    self.os_full = cluster.SimpleFillOS(
1020
      self.op.os_type,
1021
      self.op.osparams,
1022
      os_params_private=self.op.osparams_private,
1023
      os_params_secret=self.op.osparams_secret
1024
    )
1025

    
1026
    # now that hvp/bep are in final format, let's reset to defaults,
1027
    # if told to do so
1028
    if self.op.identify_defaults:
1029
      self._RevertToDefaults(cluster)
1030

    
1031
    # NIC buildup
1032
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1033
                             self.proc.GetECId())
1034

    
1035
    # disk checks/pre-build
1036
    default_vg = self.cfg.GetVGName()
1037
    self.disks = ComputeDisks(self.op, default_vg)
1038

    
1039
    if self.op.mode == constants.INSTANCE_IMPORT:
1040
      disk_images = []
1041
      for idx in range(len(self.disks)):
1042
        option = "disk%d_dump" % idx
1043
        if export_info.has_option(constants.INISECT_INS, option):
1044
          # FIXME: are the old os-es, disk sizes, etc. useful?
1045
          export_name = export_info.get(constants.INISECT_INS, option)
1046
          image = utils.PathJoin(self.op.src_path, export_name)
1047
          disk_images.append(image)
1048
        else:
1049
          disk_images.append(False)
1050

    
1051
      self.src_images = disk_images
1052

    
1053
      if self.op.instance_name == self._old_instance_name:
1054
        for idx, nic in enumerate(self.nics):
1055
          if nic.mac == constants.VALUE_AUTO:
1056
            nic_mac_ini = "nic%d_mac" % idx
1057
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1058

    
1059
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
1060

    
1061
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
1062
    if self.op.ip_check:
1063
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1064
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1065
                                   (self.check_ip, self.op.instance_name),
1066
                                   errors.ECODE_NOTUNIQUE)
1067

    
1068
    #### mac address generation
1069
    # By generating here the mac address both the allocator and the hooks get
1070
    # the real final mac address rather than the 'auto' or 'generate' value.
1071
    # There is a race condition between the generation and the instance object
1072
    # creation, which means that we know the mac is valid now, but we're not
1073
    # sure it will be when we actually add the instance. If things go bad
1074
    # adding the instance will abort because of a duplicate mac, and the
1075
    # creation job will fail.
1076
    for nic in self.nics:
1077
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1078
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1079

    
1080
    #### allocator run
1081

    
1082
    if self.op.iallocator is not None:
1083
      self._RunAllocator()
1084

    
1085
    # Release all unneeded node locks
1086
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1087
                               self.op.src_node_uuid])
1088
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1089
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1090
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1091
    # Release all unneeded group locks
1092
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1093
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1094

    
1095
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1096
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1097
      "Node locks differ from node resource locks"
1098

    
1099
    #### node related checks
1100

    
1101
    # check primary node
1102
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1103
    assert self.pnode is not None, \
1104
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1105
    if pnode.offline:
1106
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1107
                                 pnode.name, errors.ECODE_STATE)
1108
    if pnode.drained:
1109
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1110
                                 pnode.name, errors.ECODE_STATE)
1111
    if not pnode.vm_capable:
1112
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1113
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1114

    
1115
    self.secondaries = []
1116

    
1117
    # Fill in any IPs from IP pools. This must happen here, because we need to
1118
    # know the nic's primary node, as specified by the iallocator
1119
    for idx, nic in enumerate(self.nics):
1120
      net_uuid = nic.network
1121
      if net_uuid is not None:
1122
        nobj = self.cfg.GetNetwork(net_uuid)
1123
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1124
        if netparams is None:
1125
          raise errors.OpPrereqError("No netparams found for network"
1126
                                     " %s. Probably not connected to"
1127
                                     " node's %s nodegroup" %
1128
                                     (nobj.name, self.pnode.name),
1129
                                     errors.ECODE_INVAL)
1130
        self.LogInfo("NIC/%d inherits netparams %s" %
1131
                     (idx, netparams.values()))
1132
        nic.nicparams = dict(netparams)
1133
        if nic.ip is not None:
1134
          if nic.ip.lower() == constants.NIC_IP_POOL:
1135
            try:
1136
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1137
            except errors.ReservationError:
1138
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1139
                                         " from the address pool" % idx,
1140
                                         errors.ECODE_STATE)
1141
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1142
          else:
1143
            try:
1144
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1145
                                 check=self.op.conflicts_check)
1146
            except errors.ReservationError:
1147
              raise errors.OpPrereqError("IP address %s already in use"
1148
                                         " or does not belong to network %s" %
1149
                                         (nic.ip, nobj.name),
1150
                                         errors.ECODE_NOTUNIQUE)
1151

    
1152
      # net is None, ip None or given
1153
      elif self.op.conflicts_check:
1154
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1155

    
1156
    # mirror node verification
1157
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1158
      if self.op.snode_uuid == pnode.uuid:
1159
        raise errors.OpPrereqError("The secondary node cannot be the"
1160
                                   " primary node", errors.ECODE_INVAL)
1161
      CheckNodeOnline(self, self.op.snode_uuid)
1162
      CheckNodeNotDrained(self, self.op.snode_uuid)
1163
      CheckNodeVmCapable(self, self.op.snode_uuid)
1164
      self.secondaries.append(self.op.snode_uuid)
1165

    
1166
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1167
      if pnode.group != snode.group:
1168
        self.LogWarning("The primary and secondary nodes are in two"
1169
                        " different node groups; the disk parameters"
1170
                        " from the first disk's node group will be"
1171
                        " used")
1172

    
1173
    nodes = [pnode]
1174
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1175
      nodes.append(snode)
1176
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1177
    excl_stor = compat.any(map(has_es, nodes))
1178
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1179
      raise errors.OpPrereqError("Disk template %s not supported with"
1180
                                 " exclusive storage" % self.op.disk_template,
1181
                                 errors.ECODE_STATE)
1182
    for disk in self.disks:
1183
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1184

    
1185
    node_uuids = [pnode.uuid] + self.secondaries
1186

    
1187
    if not self.adopt_disks:
1188
      if self.op.disk_template == constants.DT_RBD:
1189
        # _CheckRADOSFreeSpace() is just a placeholder.
1190
        # Any function that checks prerequisites can be placed here.
1191
        # Check if there is enough space on the RADOS cluster.
1192
        CheckRADOSFreeSpace()
1193
      elif self.op.disk_template == constants.DT_EXT:
1194
        # FIXME: Function that checks prereqs if needed
1195
        pass
1196
      elif self.op.disk_template in constants.DTS_LVM:
1197
        # Check lv size requirements, if not adopting
1198
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1199
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1200
      else:
1201
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1202
        pass
1203

    
1204
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1205
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1206
                                disk[constants.IDISK_ADOPT])
1207
                     for disk in self.disks])
1208
      if len(all_lvs) != len(self.disks):
1209
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1210
                                   errors.ECODE_INVAL)
1211
      for lv_name in all_lvs:
1212
        try:
1213
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1214
          # to ReserveLV uses the same syntax
1215
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1216
        except errors.ReservationError:
1217
          raise errors.OpPrereqError("LV named %s used by another instance" %
1218
                                     lv_name, errors.ECODE_NOTUNIQUE)
1219

    
1220
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1221
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1222

    
1223
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1224
                                       vg_names.payload.keys())[pnode.uuid]
1225
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1226
      node_lvs = node_lvs.payload
1227

    
1228
      delta = all_lvs.difference(node_lvs.keys())
1229
      if delta:
1230
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1231
                                   utils.CommaJoin(delta),
1232
                                   errors.ECODE_INVAL)
1233
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1234
      if online_lvs:
1235
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1236
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1237
                                   errors.ECODE_STATE)
1238
      # update the size of disk based on what is found
1239
      for dsk in self.disks:
1240
        dsk[constants.IDISK_SIZE] = \
1241
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1242
                                        dsk[constants.IDISK_ADOPT])][0]))
1243

    
1244
    elif self.op.disk_template == constants.DT_BLOCK:
1245
      # Normalize and de-duplicate device paths
1246
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1247
                       for disk in self.disks])
1248
      if len(all_disks) != len(self.disks):
1249
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1250
                                   errors.ECODE_INVAL)
1251
      baddisks = [d for d in all_disks
1252
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1253
      if baddisks:
1254
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1255
                                   " cannot be adopted" %
1256
                                   (utils.CommaJoin(baddisks),
1257
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1258
                                   errors.ECODE_INVAL)
1259

    
1260
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1261
                                            list(all_disks))[pnode.uuid]
1262
      node_disks.Raise("Cannot get block device information from node %s" %
1263
                       pnode.name)
1264
      node_disks = node_disks.payload
1265
      delta = all_disks.difference(node_disks.keys())
1266
      if delta:
1267
        raise errors.OpPrereqError("Missing block device(s): %s" %
1268
                                   utils.CommaJoin(delta),
1269
                                   errors.ECODE_INVAL)
1270
      for dsk in self.disks:
1271
        dsk[constants.IDISK_SIZE] = \
1272
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1273

    
1274
    # Check disk access param to be compatible with specified hypervisor
1275
    node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1276
    node_group = self.cfg.GetNodeGroup(node_info.group)
1277
    disk_params = self.cfg.GetGroupDiskParams(node_group)
1278
    access_type = disk_params[self.op.disk_template].get(
1279
      constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1280
    )
1281

    
1282
    if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1283
                                            self.op.disk_template,
1284
                                            access_type):
1285
      raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1286
                                 " used with %s disk access param" %
1287
                                 (self.op.hypervisor, access_type),
1288
                                  errors.ECODE_STATE)
1289

    
1290
    # Verify instance specs
1291
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1292
    ispec = {
1293
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1294
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1295
      constants.ISPEC_DISK_COUNT: len(self.disks),
1296
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1297
                                  for disk in self.disks],
1298
      constants.ISPEC_NIC_COUNT: len(self.nics),
1299
      constants.ISPEC_SPINDLE_USE: spindle_use,
1300
      }
1301

    
1302
    group_info = self.cfg.GetNodeGroup(pnode.group)
1303
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1304
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1305
                                               self.op.disk_template)
1306
    if not self.op.ignore_ipolicy and res:
1307
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1308
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1309
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1310

    
1311
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1312

    
1313
    if self.op.os_type is not None:
1314
      CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1315

    
1316
    # check OS parameters (remotely)
1317
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1318

    
1319
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1320

    
1321
    #TODO: _CheckExtParams (remotely)
1322
    # Check parameters for extstorage
1323

    
1324
    # memory check on primary node
1325
    #TODO(dynmem): use MINMEM for checking
1326
    if self.op.start:
1327
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1328
                                self.op.hvparams)
1329
      CheckNodeFreeMemory(self, self.pnode.uuid,
1330
                          "creating instance %s" % self.op.instance_name,
1331
                          self.be_full[constants.BE_MAXMEM],
1332
                          self.op.hypervisor, hvfull)
1333

    
1334
    self.dry_run_result = list(node_uuids)
1335

    
1336
  def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
1337
    """Removes degraded disks and instance.
1338

1339
    It optionally checks whether disks are degraded.  If the disks are
1340
    degraded, they are removed and the instance is also removed from
1341
    the configuration.
1342

1343
    If L{disk_abort} is True, then the disks are considered degraded
1344
    and removed, and the instance is removed from the configuration.
1345

1346
    If L{disk_abort} is False, then it first checks whether disks are
1347
    degraded and, if so, it removes the disks and the instance is
1348
    removed from the configuration.
1349

1350
    @type feedback_fn: callable
1351
    @param feedback_fn: function used send feedback back to the caller
1352

1353
    @type disk_abort: boolean
1354
    @param disk_abort:
1355
      True if disks are degraded, False to first check if disks are
1356
      degraded
1357

1358
    @type instance: L{objects.Instance}
1359
    @param instance: instance containing the disks to check
1360

1361
    @rtype: NoneType
1362
    @return: None
1363
    @raise errors.OpPrereqError: if disks are degraded
1364

1365
    """
1366
    if disk_abort:
1367
      pass
1368
    elif self.op.wait_for_sync:
1369
      disk_abort = not WaitForSync(self, instance)
1370
    elif instance.disk_template in constants.DTS_INT_MIRROR:
1371
      # make sure the disks are not degraded (still sync-ing is ok)
1372
      feedback_fn("* checking mirrors status")
1373
      disk_abort = not WaitForSync(self, instance, oneshot=True)
1374
    else:
1375
      disk_abort = False
1376

    
1377
    if disk_abort:
1378
      RemoveDisks(self, instance)
1379
      self.cfg.RemoveInstance(instance.uuid)
1380
      # Make sure the instance lock gets removed
1381
      self.remove_locks[locking.LEVEL_INSTANCE] = instance.name
1382
      raise errors.OpExecError("There are some degraded disks for"
1383
                               " this instance")
1384

    
1385
  def Exec(self, feedback_fn):
1386
    """Create and add the instance to the cluster.
1387

1388
    """
1389
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1390
                self.owned_locks(locking.LEVEL_NODE)), \
1391
      "Node locks differ from node resource locks"
1392
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1393

    
1394
    ht_kind = self.op.hypervisor
1395
    if ht_kind in constants.HTS_REQ_PORT:
1396
      network_port = self.cfg.AllocatePort()
1397
    else:
1398
      network_port = None
1399

    
1400
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1401

    
1402
    # This is ugly but we got a chicken-egg problem here
1403
    # We can only take the group disk parameters, as the instance
1404
    # has no disks yet (we are generating them right here).
1405
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1406
    disks = GenerateDiskTemplate(self,
1407
                                 self.op.disk_template,
1408
                                 instance_uuid, self.pnode.uuid,
1409
                                 self.secondaries,
1410
                                 self.disks,
1411
                                 self.instance_file_storage_dir,
1412
                                 self.op.file_driver,
1413
                                 0,
1414
                                 feedback_fn,
1415
                                 self.cfg.GetGroupDiskParams(nodegroup))
1416

    
1417
    if self.op.os_type is None:
1418
      os_type = ""
1419
    else:
1420
      os_type = self.op.os_type
1421

    
1422
    iobj = objects.Instance(name=self.op.instance_name,
1423
                            uuid=instance_uuid,
1424
                            os=os_type,
1425
                            primary_node=self.pnode.uuid,
1426
                            nics=self.nics, disks=disks,
1427
                            disk_template=self.op.disk_template,
1428
                            disks_active=False,
1429
                            admin_state=constants.ADMINST_DOWN,
1430
                            network_port=network_port,
1431
                            beparams=self.op.beparams,
1432
                            hvparams=self.op.hvparams,
1433
                            hypervisor=self.op.hypervisor,
1434
                            osparams=self.op.osparams,
1435
                            osparams_private=self.op.osparams_private,
1436
                            )
1437

    
1438
    if self.op.tags:
1439
      for tag in self.op.tags:
1440
        iobj.AddTag(tag)
1441

    
1442
    if self.adopt_disks:
1443
      if self.op.disk_template == constants.DT_PLAIN:
1444
        # rename LVs to the newly-generated names; we need to construct
1445
        # 'fake' LV disks with the old data, plus the new unique_id
1446
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1447
        rename_to = []
1448
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1449
          rename_to.append(t_dsk.logical_id)
1450
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1451
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1452
                                               zip(tmp_disks, rename_to))
1453
        result.Raise("Failed to rename adoped LVs")
1454
    else:
1455
      feedback_fn("* creating instance disks...")
1456
      try:
1457
        CreateDisks(self, iobj)
1458
      except errors.OpExecError:
1459
        self.LogWarning("Device creation failed")
1460
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1461
        raise
1462

    
1463
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1464

    
1465
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1466

    
1467
    # Declare that we don't want to remove the instance lock anymore, as we've
1468
    # added the instance to the config
1469
    del self.remove_locks[locking.LEVEL_INSTANCE]
1470

    
1471
    if self.op.mode == constants.INSTANCE_IMPORT:
1472
      # Release unused nodes
1473
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1474
    else:
1475
      # Release all nodes
1476
      ReleaseLocks(self, locking.LEVEL_NODE)
1477

    
1478
    # Wipe disks
1479
    disk_abort = False
1480
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1481
      feedback_fn("* wiping instance disks...")
1482
      try:
1483
        WipeDisks(self, iobj)
1484
      except errors.OpExecError, err:
1485
        logging.exception("Wiping disks failed")
1486
        self.LogWarning("Wiping instance disks failed (%s)", err)
1487
        disk_abort = True
1488

    
1489
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1490

    
1491
    # Image disks
1492
    os_image = objects.GetOSImage(iobj.osparams)
1493
    disk_abort = False
1494

    
1495
    if not self.adopt_disks and os_image is not None:
1496
      master = self.cfg.GetMasterNode()
1497

    
1498
      if not utils.IsUrl(os_image) and master != self.pnode.uuid:
1499
        ssh_port = self.pnode.ndparams.get(constants.ND_SSH_PORT)
1500
        srun = ssh.SshRunner(self.cfg.GetClusterName())
1501
        srun.CopyFileToNode(self.pnode.name, ssh_port, os_image)
1502

    
1503
      feedback_fn("* imaging instance disks...")
1504
      try:
1505
        ImageDisks(self, iobj, os_image)
1506
      except errors.OpExecError, err:
1507
        logging.exception("Imaging disks failed")
1508
        self.LogWarning("Imaging instance disks failed (%s)", err)
1509
        disk_abort = True
1510

    
1511
    self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1512

    
1513
    # instance disks are now active
1514
    iobj.disks_active = True
1515

    
1516
    # Release all node resource locks
1517
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1518

    
1519
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1520
      if self.op.mode == constants.INSTANCE_CREATE:
1521
        os_image = objects.GetOSImage(self.op.osparams)
1522

    
1523
        if os_image is None and not self.op.no_install:
1524
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1525
                        not self.op.wait_for_sync)
1526
          if pause_sync:
1527
            feedback_fn("* pausing disk sync to install instance OS")
1528
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1529
                                                              (iobj.disks,
1530
                                                               iobj), True)
1531
            for idx, success in enumerate(result.payload):
1532
              if not success:
1533
                logging.warn("pause-sync of instance %s for disk %d failed",
1534
                             self.op.instance_name, idx)
1535

    
1536
          feedback_fn("* running the instance OS create scripts...")
1537
          # FIXME: pass debug option from opcode to backend
1538
          os_add_result = \
1539
            self.rpc.call_instance_os_add(self.pnode.uuid,
1540
                                          (iobj, self.op.osparams_secret),
1541
                                          False,
1542
                                          self.op.debug_level)
1543
          if pause_sync:
1544
            feedback_fn("* resuming disk sync")
1545
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1546
                                                              (iobj.disks,
1547
                                                               iobj), False)
1548
            for idx, success in enumerate(result.payload):
1549
              if not success:
1550
                logging.warn("resume-sync of instance %s for disk %d failed",
1551
                             self.op.instance_name, idx)
1552

    
1553
          os_add_result.Raise("Could not add os for instance %s"
1554
                              " on node %s" % (self.op.instance_name,
1555
                                               self.pnode.name))
1556

    
1557
      else:
1558
        if self.op.mode == constants.INSTANCE_IMPORT:
1559
          feedback_fn("* running the instance OS import scripts...")
1560

    
1561
          transfers = []
1562

    
1563
          for idx, image in enumerate(self.src_images):
1564
            if not image:
1565
              continue
1566

    
1567
            # FIXME: pass debug option from opcode to backend
1568
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1569
                                               constants.IEIO_FILE, (image, ),
1570
                                               constants.IEIO_SCRIPT,
1571
                                               ((iobj.disks[idx], iobj), idx),
1572
                                               None)
1573
            transfers.append(dt)
1574

    
1575
          import_result = \
1576
            masterd.instance.TransferInstanceData(self, feedback_fn,
1577
                                                  self.op.src_node_uuid,
1578
                                                  self.pnode.uuid,
1579
                                                  self.pnode.secondary_ip,
1580
                                                  self.op.compress,
1581
                                                  iobj, transfers)
1582
          if not compat.all(import_result):
1583
            self.LogWarning("Some disks for instance %s on node %s were not"
1584
                            " imported successfully" % (self.op.instance_name,
1585
                                                        self.pnode.name))
1586

    
1587
          rename_from = self._old_instance_name
1588

    
1589
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1590
          feedback_fn("* preparing remote import...")
1591
          # The source cluster will stop the instance before attempting to make
1592
          # a connection. In some cases stopping an instance can take a long
1593
          # time, hence the shutdown timeout is added to the connection
1594
          # timeout.
1595
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1596
                             self.op.source_shutdown_timeout)
1597
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1598

    
1599
          assert iobj.primary_node == self.pnode.uuid
1600
          disk_results = \
1601
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1602
                                          self.source_x509_ca,
1603
                                          self._cds, self.op.compress, timeouts)
1604
          if not compat.all(disk_results):
1605
            # TODO: Should the instance still be started, even if some disks
1606
            # failed to import (valid for local imports, too)?
1607
            self.LogWarning("Some disks for instance %s on node %s were not"
1608
                            " imported successfully" % (self.op.instance_name,
1609
                                                        self.pnode.name))
1610

    
1611
          rename_from = self.source_instance_name
1612

    
1613
        else:
1614
          # also checked in the prereq part
1615
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1616
                                       % self.op.mode)
1617

    
1618
        # Run rename script on newly imported instance
1619
        assert iobj.name == self.op.instance_name
1620
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1621
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1622
                                                   rename_from,
1623
                                                   self.op.debug_level)
1624
        result.Warn("Failed to run rename script for %s on node %s" %
1625
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1626

    
1627
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1628

    
1629
    if self.op.start:
1630
      iobj.admin_state = constants.ADMINST_UP
1631
      self.cfg.Update(iobj, feedback_fn)
1632
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1633
                   self.pnode.name)
1634
      feedback_fn("* starting instance...")
1635
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1636
                                            False, self.op.reason)
1637
      result.Raise("Could not start instance")
1638

    
1639
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1640

    
1641

    
1642
class LUInstanceRename(LogicalUnit):
1643
  """Rename an instance.
1644

1645
  """
1646
  HPATH = "instance-rename"
1647
  HTYPE = constants.HTYPE_INSTANCE
1648

    
1649
  def CheckArguments(self):
1650
    """Check arguments.
1651

1652
    """
1653
    if self.op.ip_check and not self.op.name_check:
1654
      # TODO: make the ip check more flexible and not depend on the name check
1655
      raise errors.OpPrereqError("IP address check requires a name check",
1656
                                 errors.ECODE_INVAL)
1657

    
1658
  def BuildHooksEnv(self):
1659
    """Build hooks env.
1660

1661
    This runs on master, primary and secondary nodes of the instance.
1662

1663
    """
1664
    env = BuildInstanceHookEnvByObject(self, self.instance)
1665
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1666
    return env
1667

    
1668
  def BuildHooksNodes(self):
1669
    """Build hooks nodes.
1670

1671
    """
1672
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1673
    return (nl, nl)
1674

    
1675
  def CheckPrereq(self):
1676
    """Check prerequisites.
1677

1678
    This checks that the instance is in the cluster and is not running.
1679

1680
    """
1681
    (self.op.instance_uuid, self.op.instance_name) = \
1682
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1683
                                self.op.instance_name)
1684
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1685
    assert instance is not None
1686

    
1687
    # It should actually not happen that an instance is running with a disabled
1688
    # disk template, but in case it does, the renaming of file-based instances
1689
    # will fail horribly. Thus, we test it before.
1690
    if (instance.disk_template in constants.DTS_FILEBASED and
1691
        self.op.new_name != instance.name):
1692
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1693
                               instance.disk_template)
1694

    
1695
    CheckNodeOnline(self, instance.primary_node)
1696
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1697
                       msg="cannot rename")
1698
    self.instance = instance
1699

    
1700
    new_name = self.op.new_name
1701
    if self.op.name_check:
1702
      hostname = _CheckHostnameSane(self, new_name)
1703
      new_name = self.op.new_name = hostname.name
1704
      if (self.op.ip_check and
1705
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1706
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1707
                                   (hostname.ip, new_name),
1708
                                   errors.ECODE_NOTUNIQUE)
1709

    
1710
    instance_names = [inst.name for
1711
                      inst in self.cfg.GetAllInstancesInfo().values()]
1712
    if new_name in instance_names and new_name != instance.name:
1713
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1714
                                 new_name, errors.ECODE_EXISTS)
1715

    
1716
  def Exec(self, feedback_fn):
1717
    """Rename the instance.
1718

1719
    """
1720
    old_name = self.instance.name
1721

    
1722
    rename_file_storage = False
1723
    if (self.instance.disk_template in (constants.DT_FILE,
1724
                                        constants.DT_SHARED_FILE) and
1725
        self.op.new_name != self.instance.name):
1726
      old_file_storage_dir = os.path.dirname(
1727
                               self.instance.disks[0].logical_id[1])
1728
      rename_file_storage = True
1729

    
1730
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1731
    # Change the instance lock. This is definitely safe while we hold the BGL.
1732
    # Otherwise the new lock would have to be added in acquired mode.
1733
    assert self.REQ_BGL
1734
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1735
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1736
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1737

    
1738
    # re-read the instance from the configuration after rename
1739
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1740

    
1741
    if rename_file_storage:
1742
      new_file_storage_dir = os.path.dirname(
1743
                               renamed_inst.disks[0].logical_id[1])
1744
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1745
                                                     old_file_storage_dir,
1746
                                                     new_file_storage_dir)
1747
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1748
                   " (but the instance has been renamed in Ganeti)" %
1749
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1750
                    old_file_storage_dir, new_file_storage_dir))
1751

    
1752
    StartInstanceDisks(self, renamed_inst, None)
1753
    # update info on disks
1754
    info = GetInstanceInfoText(renamed_inst)
1755
    for (idx, disk) in enumerate(renamed_inst.disks):
1756
      for node_uuid in renamed_inst.all_nodes:
1757
        result = self.rpc.call_blockdev_setinfo(node_uuid,
1758
                                                (disk, renamed_inst), info)
1759
        result.Warn("Error setting info on node %s for disk %s" %
1760
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1761
    try:
1762
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1763
                                                 renamed_inst, old_name,
1764
                                                 self.op.debug_level)
1765
      result.Warn("Could not run OS rename script for instance %s on node %s"
1766
                  " (but the instance has been renamed in Ganeti)" %
1767
                  (renamed_inst.name,
1768
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1769
                  self.LogWarning)
1770
    finally:
1771
      ShutdownInstanceDisks(self, renamed_inst)
1772

    
1773
    return renamed_inst.name
1774

    
1775

    
1776
class LUInstanceRemove(LogicalUnit):
1777
  """Remove an instance.
1778

1779
  """
1780
  HPATH = "instance-remove"
1781
  HTYPE = constants.HTYPE_INSTANCE
1782
  REQ_BGL = False
1783

    
1784
  def ExpandNames(self):
1785
    self._ExpandAndLockInstance()
1786
    self.needed_locks[locking.LEVEL_NODE] = []
1787
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1788
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1789

    
1790
  def DeclareLocks(self, level):
1791
    if level == locking.LEVEL_NODE:
1792
      self._LockInstancesNodes()
1793
    elif level == locking.LEVEL_NODE_RES:
1794
      # Copy node locks
1795
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1796
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1797

    
1798
  def BuildHooksEnv(self):
1799
    """Build hooks env.
1800

1801
    This runs on master, primary and secondary nodes of the instance.
1802

1803
    """
1804
    env = BuildInstanceHookEnvByObject(self, self.instance)
1805
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1806
    return env
1807

    
1808
  def BuildHooksNodes(self):
1809
    """Build hooks nodes.
1810

1811
    """
1812
    nl = [self.cfg.GetMasterNode()]
1813
    nl_post = list(self.instance.all_nodes) + nl
1814
    return (nl, nl_post)
1815

    
1816
  def CheckPrereq(self):
1817
    """Check prerequisites.
1818

1819
    This checks that the instance is in the cluster.
1820

1821
    """
1822
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1823
    assert self.instance is not None, \
1824
      "Cannot retrieve locked instance %s" % self.op.instance_name
1825

    
1826
  def Exec(self, feedback_fn):
1827
    """Remove the instance.
1828

1829
    """
1830
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1831
                 self.cfg.GetNodeName(self.instance.primary_node))
1832

    
1833
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1834
                                             self.instance,
1835
                                             self.op.shutdown_timeout,
1836
                                             self.op.reason)
1837
    if self.op.ignore_failures:
1838
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1839
    else:
1840
      result.Raise("Could not shutdown instance %s on node %s" %
1841
                   (self.instance.name,
1842
                    self.cfg.GetNodeName(self.instance.primary_node)))
1843

    
1844
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1845
            self.owned_locks(locking.LEVEL_NODE_RES))
1846
    assert not (set(self.instance.all_nodes) -
1847
                self.owned_locks(locking.LEVEL_NODE)), \
1848
      "Not owning correct locks"
1849

    
1850
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1851

    
1852

    
1853
class LUInstanceMove(LogicalUnit):
1854
  """Move an instance by data-copying.
1855

1856
  """
1857
  HPATH = "instance-move"
1858
  HTYPE = constants.HTYPE_INSTANCE
1859
  REQ_BGL = False
1860

    
1861
  def ExpandNames(self):
1862
    self._ExpandAndLockInstance()
1863
    (self.op.target_node_uuid, self.op.target_node) = \
1864
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1865
                            self.op.target_node)
1866
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1867
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1868
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1869

    
1870
  def DeclareLocks(self, level):
1871
    if level == locking.LEVEL_NODE:
1872
      self._LockInstancesNodes(primary_only=True)
1873
    elif level == locking.LEVEL_NODE_RES:
1874
      # Copy node locks
1875
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1876
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1877

    
1878
  def BuildHooksEnv(self):
1879
    """Build hooks env.
1880

1881
    This runs on master, primary and target nodes of the instance.
1882

1883
    """
1884
    env = {
1885
      "TARGET_NODE": self.op.target_node,
1886
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1887
      }
1888
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1889
    return env
1890

    
1891
  def BuildHooksNodes(self):
1892
    """Build hooks nodes.
1893

1894
    """
1895
    nl = [
1896
      self.cfg.GetMasterNode(),
1897
      self.instance.primary_node,
1898
      self.op.target_node_uuid,
1899
      ]
1900
    return (nl, nl)
1901

    
1902
  def CheckPrereq(self):
1903
    """Check prerequisites.
1904

1905
    This checks that the instance is in the cluster.
1906

1907
    """
1908
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1909
    assert self.instance is not None, \
1910
      "Cannot retrieve locked instance %s" % self.op.instance_name
1911

    
1912
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1913
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1914
                                 self.instance.disk_template,
1915
                                 errors.ECODE_STATE)
1916

    
1917
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1918
    assert target_node is not None, \
1919
      "Cannot retrieve locked node %s" % self.op.target_node
1920

    
1921
    self.target_node_uuid = target_node.uuid
1922
    if target_node.uuid == self.instance.primary_node:
1923
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1924
                                 (self.instance.name, target_node.name),
1925
                                 errors.ECODE_STATE)
1926

    
1927
    cluster = self.cfg.GetClusterInfo()
1928
    bep = cluster.FillBE(self.instance)
1929

    
1930
    for idx, dsk in enumerate(self.instance.disks):
1931
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1932
                              constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1933
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1934
                                   " cannot copy" % idx, errors.ECODE_STATE)
1935

    
1936
    CheckNodeOnline(self, target_node.uuid)
1937
    CheckNodeNotDrained(self, target_node.uuid)
1938
    CheckNodeVmCapable(self, target_node.uuid)
1939
    group_info = self.cfg.GetNodeGroup(target_node.group)
1940
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1941
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1942
                           ignore=self.op.ignore_ipolicy)
1943

    
1944
    if self.instance.admin_state == constants.ADMINST_UP:
1945
      # check memory requirements on the target node
1946
      CheckNodeFreeMemory(
1947
          self, target_node.uuid, "failing over instance %s" %
1948
          self.instance.name, bep[constants.BE_MAXMEM],
1949
          self.instance.hypervisor,
1950
          cluster.hvparams[self.instance.hypervisor])
1951
    else:
1952
      self.LogInfo("Not checking memory on the secondary node as"
1953
                   " instance will not be started")
1954

    
1955
    # check bridge existance
1956
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1957

    
1958
  def Exec(self, feedback_fn):
1959
    """Move an instance.
1960

1961
    The move is done by shutting it down on its present node, copying
1962
    the data over (slow) and starting it on the new node.
1963

1964
    """
1965
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1966
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1967

    
1968
    self.LogInfo("Shutting down instance %s on source node %s",
1969
                 self.instance.name, source_node.name)
1970

    
1971
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1972
            self.owned_locks(locking.LEVEL_NODE_RES))
1973

    
1974
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1975
                                             self.op.shutdown_timeout,
1976
                                             self.op.reason)
1977
    if self.op.ignore_consistency:
1978
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1979
                  " anyway. Please make sure node %s is down. Error details" %
1980
                  (self.instance.name, source_node.name, source_node.name),
1981
                  self.LogWarning)
1982
    else:
1983
      result.Raise("Could not shutdown instance %s on node %s" %
1984
                   (self.instance.name, source_node.name))
1985

    
1986
    # create the target disks
1987
    try:
1988
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1989
    except errors.OpExecError:
1990
      self.LogWarning("Device creation failed")
1991
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1992
      raise
1993

    
1994
    errs = []
1995
    transfers = []
1996
    # activate, get path, create transfer jobs
1997
    for idx, disk in enumerate(self.instance.disks):
1998
      # FIXME: pass debug option from opcode to backend
1999
      dt = masterd.instance.DiskTransfer("disk/%s" % idx,
2000
                                         constants.IEIO_RAW_DISK,
2001
                                         (disk, self.instance),
2002
                                         constants.IEIO_RAW_DISK,
2003
                                         (disk, self.instance),
2004
                                         None)
2005
      transfers.append(dt)
2006

    
2007
    import_result = \
2008
      masterd.instance.TransferInstanceData(self, feedback_fn,
2009
                                            source_node.uuid,
2010
                                            target_node.uuid,
2011
                                            target_node.secondary_ip,
2012
                                            self.op.compress,
2013
                                            self.instance, transfers)
2014
    if not compat.all(import_result):
2015
      errs.append("Failed to transfer instance data")
2016

    
2017
    if errs:
2018
      self.LogWarning("Some disks failed to copy, aborting")
2019
      try:
2020
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
2021
      finally:
2022
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2023
        raise errors.OpExecError("Errors during disk copy: %s" %
2024
                                 (",".join(errs),))
2025

    
2026
    self.instance.primary_node = target_node.uuid
2027
    self.cfg.Update(self.instance, feedback_fn)
2028

    
2029
    self.LogInfo("Removing the disks on the original node")
2030
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
2031

    
2032
    # Only start the instance if it's marked as up
2033
    if self.instance.admin_state == constants.ADMINST_UP:
2034
      self.LogInfo("Starting instance %s on node %s",
2035
                   self.instance.name, target_node.name)
2036

    
2037
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
2038
                                          ignore_secondaries=True)
2039
      if not disks_ok:
2040
        ShutdownInstanceDisks(self, self.instance)
2041
        raise errors.OpExecError("Can't activate the instance's disks")
2042

    
2043
      result = self.rpc.call_instance_start(target_node.uuid,
2044
                                            (self.instance, None, None), False,
2045
                                            self.op.reason)
2046
      msg = result.fail_msg
2047
      if msg:
2048
        ShutdownInstanceDisks(self, self.instance)
2049
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
2050
                                 (self.instance.name, target_node.name, msg))
2051

    
2052

    
2053
class LUInstanceMultiAlloc(NoHooksLU):
2054
  """Allocates multiple instances at the same time.
2055

2056
  """
2057
  REQ_BGL = False
2058

    
2059
  def CheckArguments(self):
2060
    """Check arguments.
2061

2062
    """
2063
    nodes = []
2064
    for inst in self.op.instances:
2065
      if inst.iallocator is not None:
2066
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
2067
                                   " instance objects", errors.ECODE_INVAL)
2068
      nodes.append(bool(inst.pnode))
2069
      if inst.disk_template in constants.DTS_INT_MIRROR:
2070
        nodes.append(bool(inst.snode))
2071

    
2072
    has_nodes = compat.any(nodes)
2073
    if compat.all(nodes) ^ has_nodes:
2074
      raise errors.OpPrereqError("There are instance objects providing"
2075
                                 " pnode/snode while others do not",
2076
                                 errors.ECODE_INVAL)
2077

    
2078
    if not has_nodes and self.op.iallocator is None:
2079
      default_iallocator = self.cfg.GetDefaultIAllocator()
2080
      if default_iallocator:
2081
        self.op.iallocator = default_iallocator
2082
      else:
2083
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
2084
                                   " given and no cluster-wide default"
2085
                                   " iallocator found; please specify either"
2086
                                   " an iallocator or nodes on the instances"
2087
                                   " or set a cluster-wide default iallocator",
2088
                                   errors.ECODE_INVAL)
2089

    
2090
    _CheckOpportunisticLocking(self.op)
2091

    
2092
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2093
    if dups:
2094
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
2095
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
2096

    
2097
  def ExpandNames(self):
2098
    """Calculate the locks.
2099

2100
    """
2101
    self.share_locks = ShareAll()
2102
    self.needed_locks = {
2103
      # iallocator will select nodes and even if no iallocator is used,
2104
      # collisions with LUInstanceCreate should be avoided
2105
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2106
      }
2107

    
2108
    if self.op.iallocator:
2109
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2110
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2111

    
2112
      if self.op.opportunistic_locking:
2113
        self.opportunistic_locks[locking.LEVEL_NODE] = True
2114
    else:
2115
      nodeslist = []
2116
      for inst in self.op.instances:
2117
        (inst.pnode_uuid, inst.pnode) = \
2118
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2119
        nodeslist.append(inst.pnode_uuid)
2120
        if inst.snode is not None:
2121
          (inst.snode_uuid, inst.snode) = \
2122
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2123
          nodeslist.append(inst.snode_uuid)
2124

    
2125
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
2126
      # Lock resources of instance's primary and secondary nodes (copy to
2127
      # prevent accidential modification)
2128
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2129

    
2130
  def DeclareLocks(self, level):
2131
    if level == locking.LEVEL_NODE_RES and \
2132
      self.opportunistic_locks[locking.LEVEL_NODE]:
2133
      # Even when using opportunistic locking, we require the same set of
2134
      # NODE_RES locks as we got NODE locks
2135
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2136
        self.owned_locks(locking.LEVEL_NODE)
2137

    
2138
  def CheckPrereq(self):
2139
    """Check prerequisite.
2140

2141
    """
2142
    if self.op.iallocator:
2143
      cluster = self.cfg.GetClusterInfo()
2144
      default_vg = self.cfg.GetVGName()
2145
      ec_id = self.proc.GetECId()
2146

    
2147
      if self.op.opportunistic_locking:
2148
        # Only consider nodes for which a lock is held
2149
        node_whitelist = self.cfg.GetNodeNames(
2150
                           list(self.owned_locks(locking.LEVEL_NODE)))
2151
      else:
2152
        node_whitelist = None
2153

    
2154
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2155
                                           _ComputeNics(op, cluster, None,
2156
                                                        self.cfg, ec_id),
2157
                                           _ComputeFullBeParams(op, cluster),
2158
                                           node_whitelist)
2159
               for op in self.op.instances]
2160

    
2161
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2162
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2163

    
2164
      ial.Run(self.op.iallocator)
2165

    
2166
      if not ial.success:
2167
        raise errors.OpPrereqError("Can't compute nodes using"
2168
                                   " iallocator '%s': %s" %
2169
                                   (self.op.iallocator, ial.info),
2170
                                   errors.ECODE_NORES)
2171

    
2172
      self.ia_result = ial.result
2173

    
2174
    if self.op.dry_run:
2175
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2176
        constants.JOB_IDS_KEY: [],
2177
        })
2178

    
2179
  def _ConstructPartialResult(self):
2180
    """Contructs the partial result.
2181

2182
    """
2183
    if self.op.iallocator:
2184
      (allocatable, failed_insts) = self.ia_result
2185
      allocatable_insts = map(compat.fst, allocatable)
2186
    else:
2187
      allocatable_insts = [op.instance_name for op in self.op.instances]
2188
      failed_insts = []
2189

    
2190
    return {
2191
      constants.ALLOCATABLE_KEY: allocatable_insts,
2192
      constants.FAILED_KEY: failed_insts,
2193
      }
2194

    
2195
  def Exec(self, feedback_fn):
2196
    """Executes the opcode.
2197

2198
    """
2199
    jobs = []
2200
    if self.op.iallocator:
2201
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2202
      (allocatable, failed) = self.ia_result
2203

    
2204
      for (name, node_names) in allocatable:
2205
        op = op2inst.pop(name)
2206

    
2207
        (op.pnode_uuid, op.pnode) = \
2208
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2209
        if len(node_names) > 1:
2210
          (op.snode_uuid, op.snode) = \
2211
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2212

    
2213
          jobs.append([op])
2214

    
2215
        missing = set(op2inst.keys()) - set(failed)
2216
        assert not missing, \
2217
          "Iallocator did return incomplete result: %s" % \
2218
          utils.CommaJoin(missing)
2219
    else:
2220
      jobs.extend([op] for op in self.op.instances)
2221

    
2222
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2223

    
2224

    
2225
class _InstNicModPrivate:
2226
  """Data structure for network interface modifications.
2227

2228
  Used by L{LUInstanceSetParams}.
2229

2230
  """
2231
  def __init__(self):
2232
    self.params = None
2233
    self.filled = None
2234

    
2235

    
2236
def _PrepareContainerMods(mods, private_fn):
2237
  """Prepares a list of container modifications by adding a private data field.
2238

2239
  @type mods: list of tuples; (operation, index, parameters)
2240
  @param mods: List of modifications
2241
  @type private_fn: callable or None
2242
  @param private_fn: Callable for constructing a private data field for a
2243
    modification
2244
  @rtype: list
2245

2246
  """
2247
  if private_fn is None:
2248
    fn = lambda: None
2249
  else:
2250
    fn = private_fn
2251

    
2252
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2253

    
2254

    
2255
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2256
  """Checks if nodes have enough physical CPUs
2257

2258
  This function checks if all given nodes have the needed number of
2259
  physical CPUs. In case any node has less CPUs or we cannot get the
2260
  information from the node, this function raises an OpPrereqError
2261
  exception.
2262

2263
  @type lu: C{LogicalUnit}
2264
  @param lu: a logical unit from which we get configuration data
2265
  @type node_uuids: C{list}
2266
  @param node_uuids: the list of node UUIDs to check
2267
  @type requested: C{int}
2268
  @param requested: the minimum acceptable number of physical CPUs
2269
  @type hypervisor_specs: list of pairs (string, dict of strings)
2270
  @param hypervisor_specs: list of hypervisor specifications in
2271
      pairs (hypervisor_name, hvparams)
2272
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2273
      or we cannot check the node
2274

2275
  """
2276
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2277
  for node_uuid in node_uuids:
2278
    info = nodeinfo[node_uuid]
2279
    node_name = lu.cfg.GetNodeName(node_uuid)
2280
    info.Raise("Cannot get current information from node %s" % node_name,
2281
               prereq=True, ecode=errors.ECODE_ENVIRON)
2282
    (_, _, (hv_info, )) = info.payload
2283
    num_cpus = hv_info.get("cpu_total", None)
2284
    if not isinstance(num_cpus, int):
2285
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2286
                                 " on node %s, result was '%s'" %
2287
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2288
    if requested > num_cpus:
2289
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2290
                                 "required" % (node_name, num_cpus, requested),
2291
                                 errors.ECODE_NORES)
2292

    
2293

    
2294
def GetItemFromContainer(identifier, kind, container):
2295
  """Return the item refered by the identifier.
2296

2297
  @type identifier: string
2298
  @param identifier: Item index or name or UUID
2299
  @type kind: string
2300
  @param kind: One-word item description
2301
  @type container: list
2302
  @param container: Container to get the item from
2303

2304
  """
2305
  # Index
2306
  try:
2307
    idx = int(identifier)
2308
    if idx == -1:
2309
      # Append
2310
      absidx = len(container) - 1
2311
    elif idx < 0:
2312
      raise IndexError("Not accepting negative indices other than -1")
2313
    elif idx > len(container):
2314
      raise IndexError("Got %s index %s, but there are only %s" %
2315
                       (kind, idx, len(container)))
2316
    else:
2317
      absidx = idx
2318
    return (absidx, container[idx])
2319
  except ValueError:
2320
    pass
2321

    
2322
  for idx, item in enumerate(container):
2323
    if item.uuid == identifier or item.name == identifier:
2324
      return (idx, item)
2325

    
2326
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2327
                             (kind, identifier), errors.ECODE_NOENT)
2328

    
2329

    
2330
def _ApplyContainerMods(kind, container, chgdesc, mods,
2331
                        create_fn, modify_fn, remove_fn,
2332
                        post_add_fn=None):
2333
  """Applies descriptions in C{mods} to C{container}.
2334

2335
  @type kind: string
2336
  @param kind: One-word item description
2337
  @type container: list
2338
  @param container: Container to modify
2339
  @type chgdesc: None or list
2340
  @param chgdesc: List of applied changes
2341
  @type mods: list
2342
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2343
  @type create_fn: callable
2344
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2345
    receives absolute item index, parameters and private data object as added
2346
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2347
    as list
2348
  @type modify_fn: callable
2349
  @param modify_fn: Callback for modifying an existing item
2350
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2351
    and private data object as added by L{_PrepareContainerMods}, returns
2352
    changes as list
2353
  @type remove_fn: callable
2354
  @param remove_fn: Callback on removing item; receives absolute item index,
2355
    item and private data object as added by L{_PrepareContainerMods}
2356
  @type post_add_fn: callable
2357
  @param post_add_fn: Callable for post-processing a newly created item after
2358
    it has been put into the container. It receives the index of the new item
2359
    and the new item as parameters.
2360

2361
  """
2362
  for (op, identifier, params, private) in mods:
2363
    changes = None
2364

    
2365
    if op == constants.DDM_ADD:
2366
      # Calculate where item will be added
2367
      # When adding an item, identifier can only be an index
2368
      try:
2369
        idx = int(identifier)
2370
      except ValueError:
2371
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2372
                                   " identifier for %s" % constants.DDM_ADD,
2373
                                   errors.ECODE_INVAL)
2374
      if idx == -1:
2375
        addidx = len(container)
2376
      else:
2377
        if idx < 0:
2378
          raise IndexError("Not accepting negative indices other than -1")
2379
        elif idx > len(container):
2380
          raise IndexError("Got %s index %s, but there are only %s" %
2381
                           (kind, idx, len(container)))
2382
        addidx = idx
2383

    
2384
      if create_fn is None:
2385
        item = params
2386
      else:
2387
        (item, changes) = create_fn(addidx, params, private)
2388

    
2389
      if idx == -1:
2390
        container.append(item)
2391
      else:
2392
        assert idx >= 0
2393
        assert idx <= len(container)
2394
        # list.insert does so before the specified index
2395
        container.insert(idx, item)
2396

    
2397
      if post_add_fn is not None:
2398
        post_add_fn(addidx, item)
2399

    
2400
    else:
2401
      # Retrieve existing item
2402
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2403

    
2404
      if op == constants.DDM_REMOVE:
2405
        assert not params
2406

    
2407
        changes = [("%s/%s" % (kind, absidx), "remove")]
2408

    
2409
        if remove_fn is not None:
2410
          msg = remove_fn(absidx, item, private)
2411
          if msg:
2412
            changes.append(("%s/%s" % (kind, absidx), msg))
2413

    
2414
        assert container[absidx] == item
2415
        del container[absidx]
2416
      elif op == constants.DDM_MODIFY:
2417
        if modify_fn is not None:
2418
          changes = modify_fn(absidx, item, params, private)
2419
      else:
2420
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2421

    
2422
    assert _TApplyContModsCbChanges(changes)
2423

    
2424
    if not (chgdesc is None or changes is None):
2425
      chgdesc.extend(changes)
2426

    
2427

    
2428
def _UpdateIvNames(base_index, disks):
2429
  """Updates the C{iv_name} attribute of disks.
2430

2431
  @type disks: list of L{objects.Disk}
2432

2433
  """
2434
  for (idx, disk) in enumerate(disks):
2435
    disk.iv_name = "disk/%s" % (base_index + idx, )
2436

    
2437

    
2438
class LUInstanceSetParams(LogicalUnit):
2439
  """Modifies an instances's parameters.
2440

2441
  """
2442
  HPATH = "instance-modify"
2443
  HTYPE = constants.HTYPE_INSTANCE
2444
  REQ_BGL = False
2445

    
2446
  @staticmethod
2447
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2448
    assert ht.TList(mods)
2449
    assert not mods or len(mods[0]) in (2, 3)
2450

    
2451
    if mods and len(mods[0]) == 2:
2452
      result = []
2453

    
2454
      addremove = 0
2455
      for op, params in mods:
2456
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2457
          result.append((op, -1, params))
2458
          addremove += 1
2459

    
2460
          if addremove > 1:
2461
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2462
                                       " supported at a time" % kind,
2463
                                       errors.ECODE_INVAL)
2464
        else:
2465
          result.append((constants.DDM_MODIFY, op, params))
2466

    
2467
      assert verify_fn(result)
2468
    else:
2469
      result = mods
2470

    
2471
    return result
2472

    
2473
  @staticmethod
2474
  def _CheckMods(kind, mods, key_types, item_fn):
2475
    """Ensures requested disk/NIC modifications are valid.
2476

2477
    """
2478
    for (op, _, params) in mods:
2479
      assert ht.TDict(params)
2480

    
2481
      # If 'key_types' is an empty dict, we assume we have an
2482
      # 'ext' template and thus do not ForceDictType
2483
      if key_types:
2484
        utils.ForceDictType(params, key_types)
2485

    
2486
      if op == constants.DDM_REMOVE:
2487
        if params:
2488
          raise errors.OpPrereqError("No settings should be passed when"
2489
                                     " removing a %s" % kind,
2490
                                     errors.ECODE_INVAL)
2491
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2492
        item_fn(op, params)
2493
      else:
2494
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2495

    
2496
  def _VerifyDiskModification(self, op, params, excl_stor):
2497
    """Verifies a disk modification.
2498

2499
    """
2500
    if op == constants.DDM_ADD:
2501
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2502
      if mode not in constants.DISK_ACCESS_SET:
2503
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2504
                                   errors.ECODE_INVAL)
2505

    
2506
      size = params.get(constants.IDISK_SIZE, None)
2507
      if size is None:
2508
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2509
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2510
      size = int(size)
2511

    
2512
      params[constants.IDISK_SIZE] = size
2513
      name = params.get(constants.IDISK_NAME, None)
2514
      if name is not None and name.lower() == constants.VALUE_NONE:
2515
        params[constants.IDISK_NAME] = None
2516

    
2517
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2518

    
2519
    elif op == constants.DDM_MODIFY:
2520
      if constants.IDISK_SIZE in params:
2521
        raise errors.OpPrereqError("Disk size change not possible, use"
2522
                                   " grow-disk", errors.ECODE_INVAL)
2523

    
2524
      # Disk modification supports changing only the disk name and mode.
2525
      # Changing arbitrary parameters is allowed only for ext disk template",
2526
      if self.instance.disk_template != constants.DT_EXT:
2527
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2528

    
2529
      name = params.get(constants.IDISK_NAME, None)
2530
      if name is not None and name.lower() == constants.VALUE_NONE:
2531
        params[constants.IDISK_NAME] = None
2532

    
2533
  @staticmethod
2534
  def _VerifyNicModification(op, params):
2535
    """Verifies a network interface modification.
2536

2537
    """
2538
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2539
      ip = params.get(constants.INIC_IP, None)
2540
      name = params.get(constants.INIC_NAME, None)
2541
      req_net = params.get(constants.INIC_NETWORK, None)
2542
      link = params.get(constants.NIC_LINK, None)
2543
      mode = params.get(constants.NIC_MODE, None)
2544
      if name is not None and name.lower() == constants.VALUE_NONE:
2545
        params[constants.INIC_NAME] = None
2546
      if req_net is not None:
2547
        if req_net.lower() == constants.VALUE_NONE:
2548
          params[constants.INIC_NETWORK] = None
2549
          req_net = None
2550
        elif link is not None or mode is not None:
2551
          raise errors.OpPrereqError("If network is given"
2552
                                     " mode or link should not",
2553
                                     errors.ECODE_INVAL)
2554

    
2555
      if op == constants.DDM_ADD:
2556
        macaddr = params.get(constants.INIC_MAC, None)
2557
        if macaddr is None:
2558
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2559

    
2560
      if ip is not None:
2561
        if ip.lower() == constants.VALUE_NONE:
2562
          params[constants.INIC_IP] = None
2563
        else:
2564
          if ip.lower() == constants.NIC_IP_POOL:
2565
            if op == constants.DDM_ADD and req_net is None:
2566
              raise errors.OpPrereqError("If ip=pool, parameter network"
2567
                                         " cannot be none",
2568
                                         errors.ECODE_INVAL)
2569
          else:
2570
            if not netutils.IPAddress.IsValid(ip):
2571
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2572
                                         errors.ECODE_INVAL)
2573

    
2574
      if constants.INIC_MAC in params:
2575
        macaddr = params[constants.INIC_MAC]
2576
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2577
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2578

    
2579
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2580
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2581
                                     " modifying an existing NIC",
2582
                                     errors.ECODE_INVAL)
2583

    
2584
  def CheckArguments(self):
2585
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2586
            self.op.hvparams or self.op.beparams or self.op.os_name or
2587
            self.op.osparams or self.op.offline is not None or
2588
            self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2589
            self.op.instance_communication is not None):
2590
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2591

    
2592
    if self.op.hvparams:
2593
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2594
                           "hypervisor", "instance", "cluster")
2595

    
2596
    self.op.disks = self._UpgradeDiskNicMods(
2597
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2598
    self.op.nics = self._UpgradeDiskNicMods(
2599
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2600

    
2601
    if self.op.disks and self.op.disk_template is not None:
2602
      raise errors.OpPrereqError("Disk template conversion and other disk"
2603
                                 " changes not supported at the same time",
2604
                                 errors.ECODE_INVAL)
2605

    
2606
    if (self.op.disk_template and
2607
        self.op.disk_template in constants.DTS_INT_MIRROR and
2608
        self.op.remote_node is None):
2609
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2610
                                 " one requires specifying a secondary node",
2611
                                 errors.ECODE_INVAL)
2612

    
2613
    # Check NIC modifications
2614
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2615
                    self._VerifyNicModification)
2616

    
2617
    if self.op.pnode:
2618
      (self.op.pnode_uuid, self.op.pnode) = \
2619
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2620

    
2621
  def ExpandNames(self):
2622
    self._ExpandAndLockInstance()
2623
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2624
    # Can't even acquire node locks in shared mode as upcoming changes in
2625
    # Ganeti 2.6 will start to modify the node object on disk conversion
2626
    self.needed_locks[locking.LEVEL_NODE] = []
2627
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2628
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2629
    # Look node group to look up the ipolicy
2630
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2631

    
2632
  def DeclareLocks(self, level):
2633
    if level == locking.LEVEL_NODEGROUP:
2634
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2635
      # Acquire locks for the instance's nodegroups optimistically. Needs
2636
      # to be verified in CheckPrereq
2637
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2638
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2639
    elif level == locking.LEVEL_NODE:
2640
      self._LockInstancesNodes()
2641
      if self.op.disk_template and self.op.remote_node:
2642
        (self.op.remote_node_uuid, self.op.remote_node) = \
2643
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2644
                                self.op.remote_node)
2645
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2646
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2647
      # Copy node locks
2648
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2649
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2650

    
2651
  def BuildHooksEnv(self):
2652
    """Build hooks env.
2653

2654
    This runs on the master, primary and secondaries.
2655

2656
    """
2657
    args = {}
2658
    if constants.BE_MINMEM in self.be_new:
2659
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2660
    if constants.BE_MAXMEM in self.be_new:
2661
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2662
    if constants.BE_VCPUS in self.be_new:
2663
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2664
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2665
    # information at all.
2666

    
2667
    if self._new_nics is not None:
2668
      nics = []
2669

    
2670
      for nic in self._new_nics:
2671
        n = copy.deepcopy(nic)
2672
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2673
        n.nicparams = nicparams
2674
        nics.append(NICToTuple(self, n))
2675

    
2676
      args["nics"] = nics
2677

    
2678
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2679
    if self.op.disk_template:
2680
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2681
    if self.op.runtime_mem:
2682
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2683

    
2684
    return env
2685

    
2686
  def BuildHooksNodes(self):
2687
    """Build hooks nodes.
2688

2689
    """
2690
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2691
    return (nl, nl)
2692

    
2693
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2694
                              old_params, cluster, pnode_uuid):
2695

    
2696
    update_params_dict = dict([(key, params[key])
2697
                               for key in constants.NICS_PARAMETERS
2698
                               if key in params])
2699

    
2700
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2701
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2702

    
2703
    new_net_uuid = None
2704
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2705
    if new_net_uuid_or_name:
2706
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2707
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2708

    
2709
    if old_net_uuid:
2710
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2711

    
2712
    if new_net_uuid:
2713
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2714
      if not netparams:
2715
        raise errors.OpPrereqError("No netparams found for the network"
2716
                                   " %s, probably not connected" %
2717
                                   new_net_obj.name, errors.ECODE_INVAL)
2718
      new_params = dict(netparams)
2719
    else:
2720
      new_params = GetUpdatedParams(old_params, update_params_dict)
2721

    
2722
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2723

    
2724
    new_filled_params = cluster.SimpleFillNIC(new_params)
2725
    objects.NIC.CheckParameterSyntax(new_filled_params)
2726

    
2727
    new_mode = new_filled_params[constants.NIC_MODE]
2728
    if new_mode == constants.NIC_MODE_BRIDGED:
2729
      bridge = new_filled_params[constants.NIC_LINK]
2730
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2731
      if msg:
2732
        msg = "Error checking bridges on node '%s': %s" % \
2733
                (self.cfg.GetNodeName(pnode_uuid), msg)
2734
        if self.op.force:
2735
          self.warn.append(msg)
2736
        else:
2737
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2738

    
2739
    elif new_mode == constants.NIC_MODE_ROUTED:
2740
      ip = params.get(constants.INIC_IP, old_ip)
2741
      if ip is None:
2742
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2743
                                   " on a routed NIC", errors.ECODE_INVAL)
2744

    
2745
    elif new_mode == constants.NIC_MODE_OVS:
2746
      # TODO: check OVS link
2747
      self.LogInfo("OVS links are currently not checked for correctness")
2748

    
2749
    if constants.INIC_MAC in params:
2750
      mac = params[constants.INIC_MAC]
2751
      if mac is None:
2752
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2753
                                   errors.ECODE_INVAL)
2754
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2755
        # otherwise generate the MAC address
2756
        params[constants.INIC_MAC] = \
2757
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2758
      else:
2759
        # or validate/reserve the current one
2760
        try:
2761
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2762
        except errors.ReservationError:
2763
          raise errors.OpPrereqError("MAC address '%s' already in use"
2764
                                     " in cluster" % mac,
2765
                                     errors.ECODE_NOTUNIQUE)
2766
    elif new_net_uuid != old_net_uuid:
2767

    
2768
      def get_net_prefix(net_uuid):
2769
        mac_prefix = None
2770
        if net_uuid:
2771
          nobj = self.cfg.GetNetwork(net_uuid)
2772
          mac_prefix = nobj.mac_prefix
2773

    
2774
        return mac_prefix
2775

    
2776
      new_prefix = get_net_prefix(new_net_uuid)
2777
      old_prefix = get_net_prefix(old_net_uuid)
2778
      if old_prefix != new_prefix:
2779
        params[constants.INIC_MAC] = \
2780
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2781

    
2782
    # if there is a change in (ip, network) tuple
2783
    new_ip = params.get(constants.INIC_IP, old_ip)
2784
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2785
      if new_ip:
2786
        # if IP is pool then require a network and generate one IP
2787
        if new_ip.lower() == constants.NIC_IP_POOL:
2788
          if new_net_uuid:
2789
            try:
2790
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2791
            except errors.ReservationError:
2792
              raise errors.OpPrereqError("Unable to get a free IP"
2793
                                         " from the address pool",
2794
                                         errors.ECODE_STATE)
2795
            self.LogInfo("Chose IP %s from network %s",
2796
                         new_ip,
2797
                         new_net_obj.name)
2798
            params[constants.INIC_IP] = new_ip
2799
          else:
2800
            raise errors.OpPrereqError("ip=pool, but no network found",
2801
                                       errors.ECODE_INVAL)
2802
        # Reserve new IP if in the new network if any
2803
        elif new_net_uuid:
2804
          try:
2805
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2806
                               check=self.op.conflicts_check)
2807
            self.LogInfo("Reserving IP %s in network %s",
2808
                         new_ip, new_net_obj.name)
2809
          except errors.ReservationError:
2810
            raise errors.OpPrereqError("IP %s not available in network %s" %
2811
                                       (new_ip, new_net_obj.name),
2812
                                       errors.ECODE_NOTUNIQUE)
2813
        # new network is None so check if new IP is a conflicting IP
2814
        elif self.op.conflicts_check:
2815
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2816

    
2817
      # release old IP if old network is not None
2818
      if old_ip and old_net_uuid:
2819
        try:
2820
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2821
        except errors.AddressPoolError:
2822
          logging.warning("Release IP %s not contained in network %s",
2823
                          old_ip, old_net_obj.name)
2824

    
2825
    # there are no changes in (ip, network) tuple and old network is not None
2826
    elif (old_net_uuid is not None and
2827
          (req_link is not None or req_mode is not None)):
2828
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2829
                                 " a NIC that is connected to a network",
2830
                                 errors.ECODE_INVAL)
2831

    
2832
    private.params = new_params
2833
    private.filled = new_filled_params
2834

    
2835
  def _PreCheckDiskTemplate(self, pnode_info):
2836
    """CheckPrereq checks related to a new disk template."""
2837
    # Arguments are passed to avoid configuration lookups
2838
    pnode_uuid = self.instance.primary_node
2839
    if self.instance.disk_template == self.op.disk_template:
2840
      raise errors.OpPrereqError("Instance already has disk template %s" %
2841
                                 self.instance.disk_template,
2842
                                 errors.ECODE_INVAL)
2843

    
2844
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2845
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2846
                                 " cluster." % self.op.disk_template)
2847

    
2848
    if (self.instance.disk_template,
2849
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2850
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2851
                                 " %s to %s" % (self.instance.disk_template,
2852
                                                self.op.disk_template),
2853
                                 errors.ECODE_INVAL)
2854
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2855
                       msg="cannot change disk template")
2856
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2857
      if self.op.remote_node_uuid == pnode_uuid:
2858
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2859
                                   " as the primary node of the instance" %
2860
                                   self.op.remote_node, errors.ECODE_STATE)
2861
      CheckNodeOnline(self, self.op.remote_node_uuid)
2862
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2863
      # FIXME: here we assume that the old instance type is DT_PLAIN
2864
      assert self.instance.disk_template == constants.DT_PLAIN
2865
      disks = [{constants.IDISK_SIZE: d.size,
2866
                constants.IDISK_VG: d.logical_id[0]}
2867
               for d in self.instance.disks]
2868
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2869
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2870

    
2871
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2872
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2873
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2874
                                                              snode_group)
2875
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2876
                             ignore=self.op.ignore_ipolicy)
2877
      if pnode_info.group != snode_info.group:
2878
        self.LogWarning("The primary and secondary nodes are in two"
2879
                        " different node groups; the disk parameters"
2880
                        " from the first disk's node group will be"
2881
                        " used")
2882

    
2883
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2884
      # Make sure none of the nodes require exclusive storage
2885
      nodes = [pnode_info]
2886
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2887
        assert snode_info
2888
        nodes.append(snode_info)
2889
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2890
      if compat.any(map(has_es, nodes)):
2891
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2892
                  " storage is enabled" % (self.instance.disk_template,
2893
                                           self.op.disk_template))
2894
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2895

    
2896
  def _PreCheckDisks(self, ispec):
2897
    """CheckPrereq checks related to disk changes.
2898

2899
    @type ispec: dict
2900
    @param ispec: instance specs to be updated with the new disks
2901

2902
    """
2903
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2904

    
2905
    excl_stor = compat.any(
2906
      rpc.GetExclusiveStorageForNodes(self.cfg,
2907
                                      self.instance.all_nodes).values()
2908
      )
2909

    
2910
    # Check disk modifications. This is done here and not in CheckArguments
2911
    # (as with NICs), because we need to know the instance's disk template
2912
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2913
    if self.instance.disk_template == constants.DT_EXT:
2914
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2915
    else:
2916
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2917
                      ver_fn)
2918

    
2919
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2920

    
2921
    # Check the validity of the `provider' parameter
2922
    if self.instance.disk_template in constants.DT_EXT:
2923
      for mod in self.diskmod:
2924
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2925
        if mod[0] == constants.DDM_ADD:
2926
          if ext_provider is None:
2927
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2928
                                       " '%s' missing, during disk add" %
2929
                                       (constants.DT_EXT,
2930
                                        constants.IDISK_PROVIDER),
2931
                                       errors.ECODE_NOENT)
2932
        elif mod[0] == constants.DDM_MODIFY:
2933
          if ext_provider:
2934
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2935
                                       " modification" %
2936
                                       constants.IDISK_PROVIDER,
2937
                                       errors.ECODE_INVAL)
2938
    else:
2939
      for mod in self.diskmod:
2940
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2941
        if ext_provider is not None:
2942
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2943
                                     " instances of type '%s'" %
2944
                                     (constants.IDISK_PROVIDER,
2945
                                      constants.DT_EXT),
2946
                                     errors.ECODE_INVAL)
2947

    
2948
    if not self.op.wait_for_sync and self.instance.disks_active:
2949
      for mod in self.diskmod:
2950
        if mod[0] == constants.DDM_ADD:
2951
          raise errors.OpPrereqError("Can't add a disk to an instance with"
2952
                                     " activated disks and"
2953
                                     " --no-wait-for-sync given.",
2954
                                     errors.ECODE_INVAL)
2955

    
2956
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2957
      raise errors.OpPrereqError("Disk operations not supported for"
2958
                                 " diskless instances", errors.ECODE_INVAL)
2959

    
2960
    def _PrepareDiskMod(_, disk, params, __):
2961
      disk.name = params.get(constants.IDISK_NAME, None)
2962

    
2963
    # Verify disk changes (operating on a copy)
2964
    disks = copy.deepcopy(self.instance.disks)
2965
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2966
                        _PrepareDiskMod, None)
2967
    utils.ValidateDeviceNames("disk", disks)
2968
    if len(disks) > constants.MAX_DISKS:
2969
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2970
                                 " more" % constants.MAX_DISKS,
2971
                                 errors.ECODE_STATE)
2972
    disk_sizes = [disk.size for disk in self.instance.disks]
2973
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2974
                      self.diskmod if op == constants.DDM_ADD)
2975
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2976
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2977

    
2978
    if self.op.offline is not None and self.op.offline:
2979
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2980
                         msg="can't change to offline")
2981

    
2982
  @staticmethod
2983
  def _InstanceCommunicationDDM(cfg, instance_communication, instance):
2984
    """Create a NIC mod that adds or removes the instance
2985
    communication NIC to a running instance.
2986

2987
    The NICS are dynamically created using the Dynamic Device
2988
    Modification (DDM).  This function produces a NIC modification
2989
    (mod) that inserts an additional NIC meant for instance
2990
    communication in or removes an existing instance communication NIC
2991
    from a running instance, using DDM.
2992

2993
    @type cfg: L{config.ConfigWriter}
2994
    @param cfg: cluster configuration
2995

2996
    @type instance_communication: boolean
2997
    @param instance_communication: whether instance communication is
2998
                                   enabled or disabled
2999

3000
    @type instance: L{objects.Instance}
3001
    @param instance: instance to which the NIC mod will be applied to
3002

3003
    @rtype: (L{constants.DDM_ADD}, -1, parameters) or
3004
            (L{constants.DDM_REMOVE}, -1, parameters) or
3005
            L{None}
3006
    @return: DDM mod containing an action to add or remove the NIC, or
3007
             None if nothing needs to be done
3008

3009
    """
3010
    nic_name = _ComputeInstanceCommunicationNIC(instance.name)
3011

    
3012
    instance_communication_nic = None
3013

    
3014
    for nic in instance.nics:
3015
      if nic.name == nic_name:
3016
        instance_communication_nic = nic
3017
        break
3018

    
3019
    if instance_communication and not instance_communication_nic:
3020
      action = constants.DDM_ADD
3021
      params = {constants.INIC_NAME: nic_name,
3022
                constants.INIC_MAC: constants.VALUE_GENERATE,
3023
                constants.INIC_IP: constants.NIC_IP_POOL,
3024
                constants.INIC_NETWORK:
3025
                  cfg.GetInstanceCommunicationNetwork()}
3026
    elif not instance_communication and instance_communication_nic:
3027
      action = constants.DDM_REMOVE
3028
      params = None
3029
    else:
3030
      action = None
3031
      params = None
3032

    
3033
    if action is not None:
3034
      return (action, -1, params)
3035
    else:
3036
      return None
3037

    
3038
  def CheckPrereq(self):
3039
    """Check prerequisites.
3040

3041
    This only checks the instance list against the existing names.
3042

3043
    """
3044
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3045
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3046
    self.cluster = self.cfg.GetClusterInfo()
3047
    cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
3048

    
3049
    assert self.instance is not None, \
3050
      "Cannot retrieve locked instance %s" % self.op.instance_name
3051

    
3052
    pnode_uuid = self.instance.primary_node
3053

    
3054
    self.warn = []
3055

    
3056
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
3057
        not self.op.force):
3058
      # verify that the instance is not up
3059
      instance_info = self.rpc.call_instance_info(
3060
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3061
          cluster_hvparams)
3062
      if instance_info.fail_msg:
3063
        self.warn.append("Can't get instance runtime information: %s" %
3064
                         instance_info.fail_msg)
3065
      elif instance_info.payload:
3066
        raise errors.OpPrereqError("Instance is still running on %s" %
3067
                                   self.cfg.GetNodeName(pnode_uuid),
3068
                                   errors.ECODE_STATE)
3069

    
3070
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3071
    node_uuids = list(self.instance.all_nodes)
3072
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3073

    
3074
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
3075
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3076
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
3077

    
3078
    # dictionary with instance information after the modification
3079
    ispec = {}
3080

    
3081
    if self.op.hotplug or self.op.hotplug_if_possible:
3082
      result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3083
                                               self.instance)
3084
      if result.fail_msg:
3085
        if self.op.hotplug:
3086
          result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3087
                       prereq=True)
3088
        else:
3089
          self.LogWarning(result.fail_msg)
3090
          self.op.hotplug = False
3091
          self.LogInfo("Modification will take place without hotplugging.")
3092
      else:
3093
        self.op.hotplug = True
3094

    
3095
    # Prepare NIC modifications
3096
    # add or remove NIC for instance communication
3097
    if self.op.instance_communication is not None:
3098
      mod = self._InstanceCommunicationDDM(self.cfg,
3099
                                           self.op.instance_communication,
3100
                                           self.instance)
3101
      if mod is not None:
3102
        self.op.nics.append(mod)
3103

    
3104
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3105

    
3106
    # OS change
3107
    if self.op.os_name and not self.op.force:
3108
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
3109
                     self.op.force_variant)
3110
      instance_os = self.op.os_name
3111
    else:
3112
      instance_os = self.instance.os
3113

    
3114
    assert not (self.op.disk_template and self.op.disks), \
3115
      "Can't modify disk template and apply disk changes at the same time"
3116

    
3117
    if self.op.disk_template:
3118
      self._PreCheckDiskTemplate(pnode_info)
3119

    
3120
    self._PreCheckDisks(ispec)
3121

    
3122
    # hvparams processing
3123
    if self.op.hvparams:
3124
      hv_type = self.instance.hypervisor
3125
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3126
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3127
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3128

    
3129
      # local check
3130
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3131
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3132
      self.hv_proposed = self.hv_new = hv_new # the new actual values
3133
      self.hv_inst = i_hvdict # the new dict (without defaults)
3134
    else:
3135
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3136
                                                   self.instance.os,
3137
                                                   self.instance.hvparams)
3138
      self.hv_new = self.hv_inst = {}
3139

    
3140
    # beparams processing
3141
    if self.op.beparams:
3142
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3143
                                  use_none=True)
3144
      objects.UpgradeBeParams(i_bedict)
3145
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3146
      be_new = self.cluster.SimpleFillBE(i_bedict)
3147
      self.be_proposed = self.be_new = be_new # the new actual values
3148
      self.be_inst = i_bedict # the new dict (without defaults)
3149
    else:
3150
      self.be_new = self.be_inst = {}
3151
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3152
    be_old = self.cluster.FillBE(self.instance)
3153

    
3154
    # CPU param validation -- checking every time a parameter is
3155
    # changed to cover all cases where either CPU mask or vcpus have
3156
    # changed
3157
    if (constants.BE_VCPUS in self.be_proposed and
3158
        constants.HV_CPU_MASK in self.hv_proposed):
3159
      cpu_list = \
3160
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3161
      # Verify mask is consistent with number of vCPUs. Can skip this
3162
      # test if only 1 entry in the CPU mask, which means same mask
3163
      # is applied to all vCPUs.
3164
      if (len(cpu_list) > 1 and
3165
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3166
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3167
                                   " CPU mask [%s]" %
3168
                                   (self.be_proposed[constants.BE_VCPUS],
3169
                                    self.hv_proposed[constants.HV_CPU_MASK]),
3170
                                   errors.ECODE_INVAL)
3171

    
3172
      # Only perform this test if a new CPU mask is given
3173
      if constants.HV_CPU_MASK in self.hv_new:
3174
        # Calculate the largest CPU number requested
3175
        max_requested_cpu = max(map(max, cpu_list))
3176
        # Check that all of the instance's nodes have enough physical CPUs to
3177
        # satisfy the requested CPU mask
3178
        hvspecs = [(self.instance.hypervisor,
3179
                    self.cfg.GetClusterInfo()
3180
                      .hvparams[self.instance.hypervisor])]
3181
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3182
                                max_requested_cpu + 1,
3183
                                hvspecs)
3184

    
3185
    # osparams processing
3186
    if self.op.osparams or self.op.osparams_private:
3187
      public_parms = self.op.osparams or {}
3188
      private_parms = self.op.osparams_private or {}
3189
      dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3190

    
3191
      if dupe_keys:
3192
        raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3193
                                   utils.CommaJoin(dupe_keys))
3194

    
3195
      self.os_inst = GetUpdatedParams(self.instance.osparams,
3196
                                      public_parms)
3197
      self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3198
                                              private_parms)
3199

    
3200
      CheckOSParams(self, True, node_uuids, instance_os,
3201
                    objects.FillDict(self.os_inst,
3202
                                     self.os_inst_private))
3203

    
3204
    else:
3205
      self.os_inst = {}
3206
      self.os_inst_private = {}
3207

    
3208
    #TODO(dynmem): do the appropriate check involving MINMEM
3209
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3210
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3211
      mem_check_list = [pnode_uuid]
3212
      if be_new[constants.BE_AUTO_BALANCE]:
3213
        # either we changed auto_balance to yes or it was from before
3214
        mem_check_list.extend(self.instance.secondary_nodes)
3215
      instance_info = self.rpc.call_instance_info(
3216
          pnode_uuid, self.instance.name, self.instance.hypervisor,
3217
          cluster_hvparams)
3218
      hvspecs = [(self.instance.hypervisor,
3219
                  cluster_hvparams)]
3220
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3221
                                         hvspecs)
3222
      pninfo = nodeinfo[pnode_uuid]
3223
      msg = pninfo.fail_msg
3224
      if msg:
3225
        # Assume the primary node is unreachable and go ahead
3226
        self.warn.append("Can't get info from primary node %s: %s" %
3227
                         (self.cfg.GetNodeName(pnode_uuid), msg))
3228
      else:
3229
        (_, _, (pnhvinfo, )) = pninfo.payload
3230
        if not isinstance(pnhvinfo.get("memory_free", None), int):
3231
          self.warn.append("Node data from primary node %s doesn't contain"
3232
                           " free memory information" %
3233
                           self.cfg.GetNodeName(pnode_uuid))
3234
        elif instance_info.fail_msg:
3235
          self.warn.append("Can't get instance runtime information: %s" %
3236
                           instance_info.fail_msg)
3237
        else:
3238
          if instance_info.payload:
3239
            current_mem = int(instance_info.payload["memory"])
3240
          else:
3241
            # Assume instance not running
3242
            # (there is a slight race condition here, but it's not very
3243
            # probable, and we have no other way to check)
3244
            # TODO: Describe race condition
3245
            current_mem = 0
3246
          #TODO(dynmem): do the appropriate check involving MINMEM
3247
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3248
                      pnhvinfo["memory_free"])
3249
          if miss_mem > 0:
3250
            raise errors.OpPrereqError("This change will prevent the instance"
3251
                                       " from starting, due to %d MB of memory"
3252
                                       " missing on its primary node" %
3253
                                       miss_mem, errors.ECODE_NORES)
3254

    
3255
      if be_new[constants.BE_AUTO_BALANCE]:
3256
        for node_uuid, nres in nodeinfo.items():
3257
          if node_uuid not in self.instance.secondary_nodes:
3258
            continue
3259
          nres.Raise("Can't get info from secondary node %s" %
3260
                     self.cfg.GetNodeName(node_uuid), prereq=True,
3261
                     ecode=errors.ECODE_STATE)
3262
          (_, _, (nhvinfo, )) = nres.payload
3263
          if not isinstance(nhvinfo.get("memory_free", None), int):
3264
            raise errors.OpPrereqError("Secondary node %s didn't return free"
3265
                                       " memory information" %
3266
                                       self.cfg.GetNodeName(node_uuid),
3267
                                       errors.ECODE_STATE)
3268
          #TODO(dynmem): do the appropriate check involving MINMEM
3269
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3270
            raise errors.OpPrereqError("This change will prevent the instance"
3271
                                       " from failover to its secondary node"
3272
                                       " %s, due to not enough memory" %
3273
                                       self.cfg.GetNodeName(node_uuid),
3274
                                       errors.ECODE_STATE)
3275

    
3276
    if self.op.runtime_mem:
3277
      remote_info = self.rpc.call_instance_info(
3278
         self.instance.primary_node, self.instance.name,
3279
         self.instance.hypervisor,
3280
         cluster_hvparams)
3281
      remote_info.Raise("Error checking node %s" %
3282
                        self.cfg.GetNodeName(self.instance.primary_node))
3283
      if not remote_info.payload: # not running already
3284
        raise errors.OpPrereqError("Instance %s is not running" %
3285
                                   self.instance.name, errors.ECODE_STATE)
3286

    
3287
      current_memory = remote_info.payload["memory"]
3288
      if (not self.op.force and
3289
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3290
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3291
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3292
                                   " and %d MB of memory unless --force is"
3293
                                   " given" %
3294
                                   (self.instance.name,
3295
                                    self.be_proposed[constants.BE_MINMEM],
3296
                                    self.be_proposed[constants.BE_MAXMEM]),
3297
                                   errors.ECODE_INVAL)
3298

    
3299
      delta = self.op.runtime_mem - current_memory
3300
      if delta > 0:
3301
        CheckNodeFreeMemory(
3302
            self, self.instance.primary_node,
3303
            "ballooning memory for instance %s" % self.instance.name, delta,
3304
            self.instance.hypervisor,
3305
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3306

    
3307
    # make self.cluster visible in the functions below
3308
    cluster = self.cluster
3309

    
3310
    def _PrepareNicCreate(_, params, private):
3311
      self._PrepareNicModification(params, private, None, None,
3312
                                   {}, cluster, pnode_uuid)
3313
      return (None, None)
3314

    
3315
    def _PrepareNicMod(_, nic, params, private):
3316
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3317
                                   nic.nicparams, cluster, pnode_uuid)
3318
      return None
3319

    
3320
    def _PrepareNicRemove(_, params, __):
3321
      ip = params.ip
3322
      net = params.network
3323
      if net is not None and ip is not None:
3324
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3325

    
3326
    # Verify NIC changes (operating on copy)
3327
    nics = [nic.Copy() for nic in self.instance.nics]
3328
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3329
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3330
    if len(nics) > constants.MAX_NICS:
3331
      raise errors.OpPrereqError("Instance has too many network interfaces"
3332
                                 " (%d), cannot add more" % constants.MAX_NICS,
3333
                                 errors.ECODE_STATE)
3334

    
3335
    # Pre-compute NIC changes (necessary to use result in hooks)
3336
    self._nic_chgdesc = []
3337
    if self.nicmod:
3338
      # Operate on copies as this is still in prereq
3339
      nics = [nic.Copy() for nic in self.instance.nics]
3340
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3341
                          self._CreateNewNic, self._ApplyNicMods,
3342
                          self._RemoveNic)
3343
      # Verify that NIC names are unique and valid
3344
      utils.ValidateDeviceNames("NIC", nics)
3345
      self._new_nics = nics
3346
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3347
    else:
3348
      self._new_nics = None
3349
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3350

    
3351
    if not self.op.ignore_ipolicy:
3352
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3353
                                                              group_info)
3354

    
3355
      # Fill ispec with backend parameters
3356
      ispec[constants.ISPEC_SPINDLE_USE] = \
3357
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3358
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3359
                                                         None)
3360

    
3361
      # Copy ispec to verify parameters with min/max values separately
3362
      if self.op.disk_template:
3363
        new_disk_template = self.op.disk_template
3364
      else:
3365
        new_disk_template = self.instance.disk_template
3366
      ispec_max = ispec.copy()
3367
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3368
        self.be_new.get(constants.BE_MAXMEM, None)
3369
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3370
                                                     new_disk_template)
3371
      ispec_min = ispec.copy()
3372
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3373
        self.be_new.get(constants.BE_MINMEM, None)
3374
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3375
                                                     new_disk_template)
3376

    
3377
      if (res_max or res_min):
3378
        # FIXME: Improve error message by including information about whether
3379
        # the upper or lower limit of the parameter fails the ipolicy.
3380
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3381
               (group_info, group_info.name,
3382
                utils.CommaJoin(set(res_max + res_min))))
3383
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3384

    
3385
  def _ConvertPlainToDrbd(self, feedback_fn):
3386
    """Converts an instance from plain to drbd.
3387

3388
    """
3389
    feedback_fn("Converting template to drbd")
3390
    pnode_uuid = self.instance.primary_node
3391
    snode_uuid = self.op.remote_node_uuid
3392

    
3393
    assert self.instance.disk_template == constants.DT_PLAIN
3394

    
3395
    # create a fake disk info for _GenerateDiskTemplate
3396
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3397
                  constants.IDISK_VG: d.logical_id[0],
3398
                  constants.IDISK_NAME: d.name}
3399
                 for d in self.instance.disks]
3400
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3401
                                     self.instance.uuid, pnode_uuid,
3402
                                     [snode_uuid], disk_info, None, None, 0,
3403
                                     feedback_fn, self.diskparams)
3404
    anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3405
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3406
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3407
    info = GetInstanceInfoText(self.instance)
3408
    feedback_fn("Creating additional volumes...")
3409
    # first, create the missing data and meta devices
3410
    for disk in anno_disks:
3411
      # unfortunately this is... not too nice
3412
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3413
                           info, True, p_excl_stor)
3414
      for child in disk.children:
3415
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3416
                             s_excl_stor)
3417
    # at this stage, all new LVs have been created, we can rename the
3418
    # old ones
3419
    feedback_fn("Renaming original volumes...")
3420
    rename_list = [(o, n.children[0].logical_id)
3421
                   for (o, n) in zip(self.instance.disks, new_disks)]
3422
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3423
    result.Raise("Failed to rename original LVs")
3424

    
3425
    feedback_fn("Initializing DRBD devices...")
3426
    # all child devices are in place, we can now create the DRBD devices
3427
    try:
3428
      for disk in anno_disks:
3429
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3430
                                       (snode_uuid, s_excl_stor)]:
3431
          f_create = node_uuid == pnode_uuid
3432
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3433
                               f_create, excl_stor)
3434
    except errors.GenericError, e:
3435
      feedback_fn("Initializing of DRBD devices failed;"
3436
                  " renaming back original volumes...")
3437
      rename_back_list = [(n.children[0], o.logical_id)
3438
                          for (n, o) in zip(new_disks, self.instance.disks)]
3439
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3440
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3441
      raise
3442

    
3443
    # at this point, the instance has been modified
3444
    self.instance.disk_template = constants.DT_DRBD8
3445
    self.instance.disks = new_disks
3446
    self.cfg.Update(self.instance, feedback_fn)
3447

    
3448
    # Release node locks while waiting for sync
3449
    ReleaseLocks(self, locking.LEVEL_NODE)
3450

    
3451
    # disks are created, waiting for sync
3452
    disk_abort = not WaitForSync(self, self.instance,
3453
                                 oneshot=not self.op.wait_for_sync)
3454
    if disk_abort:
3455
      raise errors.OpExecError("There are some degraded disks for"
3456
                               " this instance, please cleanup manually")
3457

    
3458
    # Node resource locks will be released by caller
3459

    
3460
  def _ConvertDrbdToPlain(self, feedback_fn):
3461
    """Converts an instance from drbd to plain.
3462

3463
    """
3464
    assert len(self.instance.secondary_nodes) == 1
3465
    assert self.instance.disk_template == constants.DT_DRBD8
3466

    
3467
    pnode_uuid = self.instance.primary_node
3468
    snode_uuid = self.instance.secondary_nodes[0]
3469
    feedback_fn("Converting template to plain")
3470

    
3471
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3472
    new_disks = [d.children[0] for d in self.instance.disks]
3473

    
3474
    # copy over size, mode and name
3475
    for parent, child in zip(old_disks, new_disks):
3476
      child.size = parent.size
3477
      child.mode = parent.mode
3478
      child.name = parent.name
3479

    
3480
    # this is a DRBD disk, return its port to the pool
3481
    # NOTE: this must be done right before the call to cfg.Update!
3482
    for disk in old_disks:
3483
      tcp_port = disk.logical_id[2]
3484
      self.cfg.AddTcpUdpPort(tcp_port)
3485

    
3486
    # update instance structure
3487
    self.instance.disks = new_disks
3488
    self.instance.disk_template = constants.DT_PLAIN
3489
    _UpdateIvNames(0, self.instance.disks)
3490
    self.cfg.Update(self.instance, feedback_fn)
3491

    
3492
    # Release locks in case removing disks takes a while
3493
    ReleaseLocks(self, locking.LEVEL_NODE)
3494

    
3495
    feedback_fn("Removing volumes on the secondary node...")
3496
    for disk in old_disks:
3497
      result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3498
      result.Warn("Could not remove block device %s on node %s,"
3499
                  " continuing anyway" %
3500
                  (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3501
                  self.LogWarning)
3502

    
3503
    feedback_fn("Removing unneeded volumes on the primary node...")
3504
    for idx, disk in enumerate(old_disks):
3505
      meta = disk.children[1]
3506
      result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3507
      result.Warn("Could not remove metadata for disk %d on node %s,"
3508
                  " continuing anyway" %
3509
                  (idx, self.cfg.GetNodeName(pnode_uuid)),
3510
                  self.LogWarning)
3511

    
3512
  def _HotplugDevice(self, action, dev_type, device, extra, seq):
3513
    self.LogInfo("Trying to hotplug device...")
3514
    msg = "hotplug:"
3515
    result = self.rpc.call_hotplug_device(self.instance.primary_node,
3516
                                          self.instance, action, dev_type,
3517
                                          (device, self.instance),
3518
                                          extra, seq)
3519
    if result.fail_msg:
3520
      self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3521
      self.LogInfo("Continuing execution..")
3522
      msg += "failed"
3523
    else:
3524
      self.LogInfo("Hotplug done.")
3525
      msg += "done"
3526
    return msg
3527

    
3528
  def _CreateNewDisk(self, idx, params, _):
3529
    """Creates a new disk.
3530

3531
    """
3532
    # add a new disk
3533
    if self.instance.disk_template in constants.DTS_FILEBASED:
3534
      (file_driver, file_path) = self.instance.disks[0].logical_id
3535
      file_path = os.path.dirname(file_path)
3536
    else:
3537
      file_driver = file_path = None
3538

    
3539
    disk = \
3540
      GenerateDiskTemplate(self, self.instance.disk_template,
3541
                           self.instance.uuid, self.instance.primary_node,
3542
                           self.instance.secondary_nodes, [params], file_path,
3543
                           file_driver, idx, self.Log, self.diskparams)[0]
3544

    
3545
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3546

    
3547
    if self.cluster.prealloc_wipe_disks:
3548
      # Wipe new disk
3549
      WipeOrCleanupDisks(self, self.instance,
3550
                         disks=[(idx, disk, 0)],
3551
                         cleanup=new_disks)
3552

    
3553
    changes = [
3554
      ("disk/%d" % idx,
3555
       "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3556
      ]
3557
    if self.op.hotplug:
3558
      result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3559
                                               (disk, self.instance),
3560
                                               self.instance.name, True, idx)
3561
      if result.fail_msg:
3562
        changes.append(("disk/%d" % idx, "assemble:failed"))
3563
        self.LogWarning("Can't assemble newly created disk %d: %s",
3564
                        idx, result.fail_msg)
3565
      else:
3566
        _, link_name = result.payload
3567
        msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3568
                                  constants.HOTPLUG_TARGET_DISK,
3569
                                  disk, link_name, idx)
3570
        changes.append(("disk/%d" % idx, msg))
3571

    
3572
    return (disk, changes)
3573

    
3574
  def _PostAddDisk(self, _, disk):
3575
    if not WaitForSync(self, self.instance, disks=[disk],
3576
                       oneshot=not self.op.wait_for_sync):
3577
      raise errors.OpExecError("Failed to sync disks of %s" %
3578
                               self.instance.name)
3579

    
3580
    # the disk is active at this point, so deactivate it if the instance disks
3581
    # are supposed to be inactive
3582
    if not self.instance.disks_active:
3583
      ShutdownInstanceDisks(self, self.instance, disks=[disk])
3584

    
3585
  def _ModifyDisk(self, idx, disk, params, _):
3586
    """Modifies a disk.
3587

3588
    """
3589
    changes = []
3590
    if constants.IDISK_MODE in params:
3591
      disk.mode = params.get(constants.IDISK_MODE)
3592
      changes.append(("disk.mode/%d" % idx, disk.mode))
3593

    
3594
    if constants.IDISK_NAME in params:
3595
      disk.name = params.get(constants.IDISK_NAME)
3596
      changes.append(("disk.name/%d" % idx, disk.name))
3597

    
3598
    # Modify arbitrary params in case instance template is ext
3599
    for key, value in params.iteritems():
3600
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3601
          self.instance.disk_template == constants.DT_EXT):
3602
        # stolen from GetUpdatedParams: default means reset/delete
3603
        if value.lower() == constants.VALUE_DEFAULT:
3604
          try:
3605
            del disk.params[key]
3606
          except KeyError:
3607
            pass
3608
        else:
3609
          disk.params[key] = value
3610
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3611

    
3612
    return changes
3613

    
3614
  def _RemoveDisk(self, idx, root, _):
3615
    """Removes a disk.
3616

3617
    """
3618
    hotmsg = ""
3619
    if self.op.hotplug:
3620
      hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3621
                                   constants.HOTPLUG_TARGET_DISK,
3622
                                   root, None, idx)
3623
      ShutdownInstanceDisks(self, self.instance, [root])
3624

    
3625
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3626
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3627
                             self.instance.primary_node):
3628
      msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3629
              .fail_msg
3630
      if msg:
3631
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3632
                        " continuing anyway", idx,
3633
                        self.cfg.GetNodeName(node_uuid), msg)
3634

    
3635
    # if this is a DRBD disk, return its port to the pool
3636
    if root.dev_type in constants.DTS_DRBD:
3637
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3638

    
3639
    return hotmsg
3640

    
3641
  def _CreateNewNic(self, idx, params, private):
3642
    """Creates data structure for a new network interface.
3643

3644
    """
3645
    mac = params[constants.INIC_MAC]
3646
    ip = params.get(constants.INIC_IP, None)
3647
    net = params.get(constants.INIC_NETWORK, None)
3648
    name = params.get(constants.INIC_NAME, None)
3649
    net_uuid = self.cfg.LookupNetwork(net)
3650
    #TODO: not private.filled?? can a nic have no nicparams??
3651
    nicparams = private.filled
3652
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3653
                       nicparams=nicparams)
3654
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3655

    
3656
    changes = [
3657
      ("nic.%d" % idx,
3658
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3659
       (mac, ip, private.filled[constants.NIC_MODE],
3660
       private.filled[constants.NIC_LINK], net)),
3661
      ]
3662

    
3663
    if self.op.hotplug:
3664
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3665
                                constants.HOTPLUG_TARGET_NIC,
3666
                                nobj, None, idx)
3667
      changes.append(("nic.%d" % idx, msg))
3668

    
3669
    return (nobj, changes)
3670

    
3671
  def _ApplyNicMods(self, idx, nic, params, private):
3672
    """Modifies a network interface.
3673

3674
    """
3675
    changes = []
3676

    
3677
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3678
      if key in params:
3679
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3680
        setattr(nic, key, params[key])
3681

    
3682
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3683
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3684
    if new_net_uuid != nic.network:
3685
      changes.append(("nic.network/%d" % idx, new_net))
3686
      nic.network = new_net_uuid
3687

    
3688
    if private.filled:
3689
      nic.nicparams = private.filled
3690

    
3691
      for (key, val) in nic.nicparams.items():
3692
        changes.append(("nic.%s/%d" % (key, idx), val))
3693

    
3694
    if self.op.hotplug:
3695
      msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3696
                                constants.HOTPLUG_TARGET_NIC,
3697
                                nic, None, idx)
3698
      changes.append(("nic/%d" % idx, msg))
3699

    
3700
    return changes
3701

    
3702
  def _RemoveNic(self, idx, nic, _):
3703
    if self.op.hotplug:
3704
      return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3705
                                 constants.HOTPLUG_TARGET_NIC,
3706
                                 nic, None, idx)
3707

    
3708
  def Exec(self, feedback_fn):
3709
    """Modifies an instance.
3710

3711
    All parameters take effect only at the next restart of the instance.
3712

3713
    """
3714
    # Process here the warnings from CheckPrereq, as we don't have a
3715
    # feedback_fn there.
3716
    # TODO: Replace with self.LogWarning
3717
    for warn in self.warn:
3718
      feedback_fn("WARNING: %s" % warn)
3719

    
3720
    assert ((self.op.disk_template is None) ^
3721
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3722
      "Not owning any node resource locks"
3723

    
3724
    result = []
3725

    
3726
    # New primary node
3727
    if self.op.pnode_uuid:
3728
      self.instance.primary_node = self.op.pnode_uuid
3729

    
3730
    # runtime memory
3731
    if self.op.runtime_mem:
3732
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3733
                                                     self.instance,
3734
                                                     self.op.runtime_mem)
3735
      rpcres.Raise("Cannot modify instance runtime memory")
3736
      result.append(("runtime_memory", self.op.runtime_mem))
3737

    
3738
    # Apply disk changes
3739
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3740
                        self._CreateNewDisk, self._ModifyDisk,
3741
                        self._RemoveDisk, post_add_fn=self._PostAddDisk)
3742
    _UpdateIvNames(0, self.instance.disks)
3743

    
3744
    if self.op.disk_template:
3745
      if __debug__:
3746
        check_nodes = set(self.instance.all_nodes)
3747
        if self.op.remote_node_uuid:
3748
          check_nodes.add(self.op.remote_node_uuid)
3749
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3750
          owned = self.owned_locks(level)
3751
          assert not (check_nodes - owned), \
3752
            ("Not owning the correct locks, owning %r, expected at least %r" %
3753
             (owned, check_nodes))
3754

    
3755
      r_shut = ShutdownInstanceDisks(self, self.instance)
3756
      if not r_shut:
3757
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3758
                                 " proceed with disk template conversion")
3759
      mode = (self.instance.disk_template, self.op.disk_template)
3760
      try:
3761
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3762
      except:
3763
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3764
        raise
3765
      result.append(("disk_template", self.op.disk_template))
3766

    
3767
      assert self.instance.disk_template == self.op.disk_template, \
3768
        ("Expected disk template '%s', found '%s'" %
3769
         (self.op.disk_template, self.instance.disk_template))
3770

    
3771
    # Release node and resource locks if there are any (they might already have
3772
    # been released during disk conversion)
3773
    ReleaseLocks(self, locking.LEVEL_NODE)
3774
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3775

    
3776
    # Apply NIC changes
3777
    if self._new_nics is not None:
3778
      self.instance.nics = self._new_nics
3779
      result.extend(self._nic_chgdesc)
3780

    
3781
    # hvparams changes
3782
    if self.op.hvparams:
3783
      self.instance.hvparams = self.hv_inst
3784
      for key, val in self.op.hvparams.iteritems():
3785
        result.append(("hv/%s" % key, val))
3786

    
3787
    # beparams changes
3788
    if self.op.beparams:
3789
      self.instance.beparams = self.be_inst
3790
      for key, val in self.op.beparams.iteritems():
3791
        result.append(("be/%s" % key, val))
3792

    
3793
    # OS change
3794
    if self.op.os_name:
3795
      self.instance.os = self.op.os_name
3796

    
3797
    # osparams changes
3798
    if self.op.osparams:
3799
      self.instance.osparams = self.os_inst
3800
      for key, val in self.op.osparams.iteritems():
3801
        result.append(("os/%s" % key, val))
3802

    
3803
    if self.op.osparams_private:
3804
      self.instance.osparams_private = self.os_inst_private
3805
      for key, val in self.op.osparams_private.iteritems():
3806
        # Show the Private(...) blurb.
3807
        result.append(("os_private/%s" % key, repr(val)))
3808

    
3809
    if self.op.offline is None:
3810
      # Ignore
3811
      pass
3812
    elif self.op.offline:
3813
      # Mark instance as offline
3814
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3815
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3816
    else:
3817
      # Mark instance as online, but stopped
3818
      self.cfg.MarkInstanceDown(self.instance.uuid)
3819
      result.append(("admin_state", constants.ADMINST_DOWN))
3820

    
3821
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3822

    
3823
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3824
                self.owned_locks(locking.LEVEL_NODE)), \
3825
      "All node locks should have been released by now"
3826

    
3827
    return result
3828

    
3829
  _DISK_CONVERSIONS = {
3830
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3831
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3832
    }
3833

    
3834

    
3835
class LUInstanceChangeGroup(LogicalUnit):
3836
  HPATH = "instance-change-group"
3837
  HTYPE = constants.HTYPE_INSTANCE
3838
  REQ_BGL = False
3839

    
3840
  def ExpandNames(self):
3841
    self.share_locks = ShareAll()
3842

    
3843
    self.needed_locks = {
3844
      locking.LEVEL_NODEGROUP: [],
3845
      locking.LEVEL_NODE: [],
3846
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3847
      }
3848

    
3849
    self._ExpandAndLockInstance()
3850

    
3851
    if self.op.target_groups:
3852
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3853
                                  self.op.target_groups)
3854
    else:
3855
      self.req_target_uuids = None
3856

    
3857
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3858

    
3859
  def DeclareLocks(self, level):
3860
    if level == locking.LEVEL_NODEGROUP:
3861
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3862

    
3863
      if self.req_target_uuids:
3864
        lock_groups = set(self.req_target_uuids)
3865

    
3866
        # Lock all groups used by instance optimistically; this requires going
3867
        # via the node before it's locked, requiring verification later on
3868
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3869
        lock_groups.update(instance_groups)
3870
      else:
3871
        # No target groups, need to lock all of them
3872
        lock_groups = locking.ALL_SET
3873

    
3874
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3875

    
3876
    elif level == locking.LEVEL_NODE:
3877
      if self.req_target_uuids:
3878
        # Lock all nodes used by instances
3879
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3880
        self._LockInstancesNodes()
3881

    
3882
        # Lock all nodes in all potential target groups
3883
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3884
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3885
        member_nodes = [node_uuid
3886
                        for group in lock_groups
3887
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3888
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3889
      else:
3890
        # Lock all nodes as all groups are potential targets
3891
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3892

    
3893
  def CheckPrereq(self):
3894
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3895
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3896
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3897

    
3898
    assert (self.req_target_uuids is None or
3899
            owned_groups.issuperset(self.req_target_uuids))
3900
    assert owned_instance_names == set([self.op.instance_name])
3901

    
3902
    # Get instance information
3903
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3904

    
3905
    # Check if node groups for locked instance are still correct
3906
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3907
      ("Instance %s's nodes changed while we kept the lock" %
3908
       self.op.instance_name)
3909

    
3910
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3911
                                          owned_groups)
3912

    
3913
    if self.req_target_uuids:
3914
      # User requested specific target groups
3915
      self.target_uuids = frozenset(self.req_target_uuids)
3916
    else:
3917
      # All groups except those used by the instance are potential targets
3918
      self.target_uuids = owned_groups - inst_groups
3919

    
3920
    conflicting_groups = self.target_uuids & inst_groups
3921
    if conflicting_groups:
3922
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3923
                                 " used by the instance '%s'" %
3924
                                 (utils.CommaJoin(conflicting_groups),
3925
                                  self.op.instance_name),
3926
                                 errors.ECODE_INVAL)
3927

    
3928
    if not self.target_uuids:
3929
      raise errors.OpPrereqError("There are no possible target groups",
3930
                                 errors.ECODE_INVAL)
3931

    
3932
  def BuildHooksEnv(self):
3933
    """Build hooks env.
3934

3935
    """
3936
    assert self.target_uuids
3937

    
3938
    env = {
3939
      "TARGET_GROUPS": " ".join(self.target_uuids),
3940
      }
3941

    
3942
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3943

    
3944
    return env
3945

    
3946
  def BuildHooksNodes(self):
3947
    """Build hooks nodes.
3948

3949
    """
3950
    mn = self.cfg.GetMasterNode()
3951
    return ([mn], [mn])
3952

    
3953
  def Exec(self, feedback_fn):
3954
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3955

    
3956
    assert instances == [self.op.instance_name], "Instance not locked"
3957

    
3958
    req = iallocator.IAReqGroupChange(instances=instances,
3959
                                      target_groups=list(self.target_uuids))
3960
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3961

    
3962
    ial.Run(self.op.iallocator)
3963

    
3964
    if not ial.success:
3965
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3966
                                 " instance '%s' using iallocator '%s': %s" %
3967
                                 (self.op.instance_name, self.op.iallocator,
3968
                                  ial.info), errors.ECODE_NORES)
3969

    
3970
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3971

    
3972
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3973
                 " instance '%s'", len(jobs), self.op.instance_name)
3974

    
3975
    return ResultWithJobs(jobs)