Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 66222813

History | View | Annotate | Download (141.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
187
      raise errors.OpPrereqError("VLAN is given, but network mode is not"
188
                                 " openvswitch", errors.ECODE_INVAL)
189

    
190
    # ip validity checks
191
    if ip is None or ip.lower() == constants.VALUE_NONE:
192
      nic_ip = None
193
    elif ip.lower() == constants.VALUE_AUTO:
194
      if not op.name_check:
195
        raise errors.OpPrereqError("IP address set to auto but name checks"
196
                                   " have been skipped",
197
                                   errors.ECODE_INVAL)
198
      nic_ip = default_ip
199
    else:
200
      # We defer pool operations until later, so that the iallocator has
201
      # filled in the instance's node(s) dimara
202
      if ip.lower() == constants.NIC_IP_POOL:
203
        if net is None:
204
          raise errors.OpPrereqError("if ip=pool, parameter network"
205
                                     " must be passed too",
206
                                     errors.ECODE_INVAL)
207

    
208
      elif not netutils.IPAddress.IsValid(ip):
209
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210
                                   errors.ECODE_INVAL)
211

    
212
      nic_ip = ip
213

    
214
    # TODO: check the ip address for uniqueness
215
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
217
                                 errors.ECODE_INVAL)
218

    
219
    # MAC address verification
220
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222
      mac = utils.NormalizeAndValidateMac(mac)
223

    
224
      try:
225
        # TODO: We need to factor this out
226
        cfg.ReserveMAC(mac, ec_id)
227
      except errors.ReservationError:
228
        raise errors.OpPrereqError("MAC address %s already in use"
229
                                   " in cluster" % mac,
230
                                   errors.ECODE_NOTUNIQUE)
231

    
232
    #  Build nic parameters
233
    nicparams = {}
234
    if nic_mode_req:
235
      nicparams[constants.NIC_MODE] = nic_mode
236
    if link:
237
      nicparams[constants.NIC_LINK] = link
238
    if vlan:
239
      nicparams[constants.NIC_VLAN] = vlan
240

    
241
    check_params = cluster.SimpleFillNIC(nicparams)
242
    objects.NIC.CheckParameterSyntax(check_params)
243
    net_uuid = cfg.LookupNetwork(net)
244
    name = nic.get(constants.INIC_NAME, None)
245
    if name is not None and name.lower() == constants.VALUE_NONE:
246
      name = None
247
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248
                          network=net_uuid, nicparams=nicparams)
249
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
250
    nics.append(nic_obj)
251

    
252
  return nics
253

    
254

    
255
def _CheckForConflictingIp(lu, ip, node_uuid):
256
  """In case of conflicting IP address raise error.
257

258
  @type ip: string
259
  @param ip: IP address
260
  @type node_uuid: string
261
  @param node_uuid: node UUID
262

263
  """
264
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
265
  if conf_net is not None:
266
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267
                                " network %s, but the target NIC does not." %
268
                                (ip, conf_net)),
269
                               errors.ECODE_STATE)
270

    
271
  return (None, None)
272

    
273

    
274
def _ComputeIPolicyInstanceSpecViolation(
275
  ipolicy, instance_spec, disk_template,
276
  _compute_fn=ComputeIPolicySpecViolation):
277
  """Compute if instance specs meets the specs of ipolicy.
278

279
  @type ipolicy: dict
280
  @param ipolicy: The ipolicy to verify against
281
  @param instance_spec: dict
282
  @param instance_spec: The instance spec to verify
283
  @type disk_template: string
284
  @param disk_template: the disk template of the instance
285
  @param _compute_fn: The function to verify ipolicy (unittest only)
286
  @see: L{ComputeIPolicySpecViolation}
287

288
  """
289
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
295

    
296
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297
                     disk_sizes, spindle_use, disk_template)
298

    
299

    
300
def _CheckOSVariant(os_obj, name):
301
  """Check whether an OS name conforms to the os variants specification.
302

303
  @type os_obj: L{objects.OS}
304
  @param os_obj: OS object to check
305
  @type name: string
306
  @param name: OS name passed by the user, to check for validity
307

308
  """
309
  variant = objects.OS.GetVariant(name)
310
  if not os_obj.supported_variants:
311
    if variant:
312
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313
                                 " passed)" % (os_obj.name, variant),
314
                                 errors.ECODE_INVAL)
315
    return
316
  if not variant:
317
    raise errors.OpPrereqError("OS name must include a variant",
318
                               errors.ECODE_INVAL)
319

    
320
  if variant not in os_obj.supported_variants:
321
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
322

    
323

    
324
class LUInstanceCreate(LogicalUnit):
325
  """Create an instance.
326

327
  """
328
  HPATH = "instance-add"
329
  HTYPE = constants.HTYPE_INSTANCE
330
  REQ_BGL = False
331

    
332
  def _CheckDiskTemplateValid(self):
333
    """Checks validity of disk template.
334

335
    """
336
    cluster = self.cfg.GetClusterInfo()
337
    if self.op.disk_template is None:
338
      # FIXME: It would be better to take the default disk template from the
339
      # ipolicy, but for the ipolicy we need the primary node, which we get from
340
      # the iallocator, which wants the disk template as input. To solve this
341
      # chicken-and-egg problem, it should be possible to specify just a node
342
      # group from the iallocator and take the ipolicy from that.
343
      self.op.disk_template = cluster.enabled_disk_templates[0]
344
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
345

    
346
  def _CheckDiskArguments(self):
347
    """Checks validity of disk-related arguments.
348

349
    """
350
    # check that disk's names are unique and valid
351
    utils.ValidateDeviceNames("disk", self.op.disks)
352

    
353
    self._CheckDiskTemplateValid()
354

    
355
    # check disks. parameter names and consistent adopt/no-adopt strategy
356
    has_adopt = has_no_adopt = False
357
    for disk in self.op.disks:
358
      if self.op.disk_template != constants.DT_EXT:
359
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
360
      if constants.IDISK_ADOPT in disk:
361
        has_adopt = True
362
      else:
363
        has_no_adopt = True
364
    if has_adopt and has_no_adopt:
365
      raise errors.OpPrereqError("Either all disks are adopted or none is",
366
                                 errors.ECODE_INVAL)
367
    if has_adopt:
368
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
369
        raise errors.OpPrereqError("Disk adoption is not supported for the"
370
                                   " '%s' disk template" %
371
                                   self.op.disk_template,
372
                                   errors.ECODE_INVAL)
373
      if self.op.iallocator is not None:
374
        raise errors.OpPrereqError("Disk adoption not allowed with an"
375
                                   " iallocator script", errors.ECODE_INVAL)
376
      if self.op.mode == constants.INSTANCE_IMPORT:
377
        raise errors.OpPrereqError("Disk adoption not allowed for"
378
                                   " instance import", errors.ECODE_INVAL)
379
    else:
380
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
381
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
382
                                   " but no 'adopt' parameter given" %
383
                                   self.op.disk_template,
384
                                   errors.ECODE_INVAL)
385

    
386
    self.adopt_disks = has_adopt
387

    
388
  def _CheckVLANArguments(self):
389
    """ Check validity of VLANs if given
390

391
    """
392
    for nic in self.op.nics:
393
      vlan = nic.get(constants.INIC_VLAN, None)
394
      if vlan:
395
        if vlan[0] == ".":
396
          # vlan starting with dot means single untagged vlan,
397
          # might be followed by trunk (:)
398
          if not vlan[1:].isdigit():
399
            vlanlist = vlan[1:].split(':')
400
            for vl in vlanlist:
401
              if not vl.isdigit():
402
                raise errors.OpPrereqError("Specified VLAN parameter is "
403
                                           "invalid : %s" % vlan,
404
                                             errors.ECODE_INVAL)
405
        elif vlan[0] == ":":
406
          # Trunk - tagged only
407
          vlanlist = vlan[1:].split(':')
408
          for vl in vlanlist:
409
            if not vl.isdigit():
410
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                           " : %s" % vlan, errors.ECODE_INVAL)
412
        elif vlan.isdigit():
413
          # This is the simplest case. No dots, only single digit
414
          # -> Create untagged access port, dot needs to be added
415
          nic[constants.INIC_VLAN] = "." + vlan
416
        else:
417
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418
                                       " : %s" % vlan, errors.ECODE_INVAL)
419

    
420
  def CheckArguments(self):
421
    """Check arguments.
422

423
    """
424
    # do not require name_check to ease forward/backward compatibility
425
    # for tools
426
    if self.op.no_install and self.op.start:
427
      self.LogInfo("No-installation mode selected, disabling startup")
428
      self.op.start = False
429
    # validate/normalize the instance name
430
    self.op.instance_name = \
431
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
432

    
433
    if self.op.ip_check and not self.op.name_check:
434
      # TODO: make the ip check more flexible and not depend on the name check
435
      raise errors.OpPrereqError("Cannot do IP address check without a name"
436
                                 " check", errors.ECODE_INVAL)
437

    
438
    # check nics' parameter names
439
    for nic in self.op.nics:
440
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
441
    # check that NIC's parameters names are unique and valid
442
    utils.ValidateDeviceNames("NIC", self.op.nics)
443

    
444
    self._CheckVLANArguments()
445

    
446
    self._CheckDiskArguments()
447
    assert self.op.disk_template is not None
448

    
449
    # instance name verification
450
    if self.op.name_check:
451
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
452
      self.op.instance_name = self.hostname.name
453
      # used in CheckPrereq for ip ping check
454
      self.check_ip = self.hostname.ip
455
    else:
456
      self.check_ip = None
457

    
458
    ### Node/iallocator related checks
459
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
460

    
461
    if self.op.pnode is not None:
462
      if self.op.disk_template in constants.DTS_INT_MIRROR:
463
        if self.op.snode is None:
464
          raise errors.OpPrereqError("The networked disk templates need"
465
                                     " a mirror node", errors.ECODE_INVAL)
466
      elif self.op.snode:
467
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
468
                        " template")
469
        self.op.snode = None
470

    
471
    _CheckOpportunisticLocking(self.op)
472

    
473
    if self.op.mode == constants.INSTANCE_IMPORT:
474
      # On import force_variant must be True, because if we forced it at
475
      # initial install, our only chance when importing it back is that it
476
      # works again!
477
      self.op.force_variant = True
478

    
479
      if self.op.no_install:
480
        self.LogInfo("No-installation mode has no effect during import")
481

    
482
    elif self.op.mode == constants.INSTANCE_CREATE:
483
      if self.op.os_type is None:
484
        raise errors.OpPrereqError("No guest OS specified",
485
                                   errors.ECODE_INVAL)
486
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
487
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
488
                                   " installation" % self.op.os_type,
489
                                   errors.ECODE_STATE)
490
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
491
      self._cds = GetClusterDomainSecret()
492

    
493
      # Check handshake to ensure both clusters have the same domain secret
494
      src_handshake = self.op.source_handshake
495
      if not src_handshake:
496
        raise errors.OpPrereqError("Missing source handshake",
497
                                   errors.ECODE_INVAL)
498

    
499
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
500
                                                           src_handshake)
501
      if errmsg:
502
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
503
                                   errors.ECODE_INVAL)
504

    
505
      # Load and check source CA
506
      self.source_x509_ca_pem = self.op.source_x509_ca
507
      if not self.source_x509_ca_pem:
508
        raise errors.OpPrereqError("Missing source X509 CA",
509
                                   errors.ECODE_INVAL)
510

    
511
      try:
512
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
513
                                                    self._cds)
514
      except OpenSSL.crypto.Error, err:
515
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
516
                                   (err, ), errors.ECODE_INVAL)
517

    
518
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
519
      if errcode is not None:
520
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
521
                                   errors.ECODE_INVAL)
522

    
523
      self.source_x509_ca = cert
524

    
525
      src_instance_name = self.op.source_instance_name
526
      if not src_instance_name:
527
        raise errors.OpPrereqError("Missing source instance name",
528
                                   errors.ECODE_INVAL)
529

    
530
      self.source_instance_name = \
531
        netutils.GetHostname(name=src_instance_name).name
532

    
533
    else:
534
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
535
                                 self.op.mode, errors.ECODE_INVAL)
536

    
537
  def ExpandNames(self):
538
    """ExpandNames for CreateInstance.
539

540
    Figure out the right locks for instance creation.
541

542
    """
543
    self.needed_locks = {}
544

    
545
    # this is just a preventive check, but someone might still add this
546
    # instance in the meantime, and creation will fail at lock-add time
547
    if self.op.instance_name in\
548
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
549
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
550
                                 self.op.instance_name, errors.ECODE_EXISTS)
551

    
552
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
553

    
554
    if self.op.iallocator:
555
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
556
      # specifying a group on instance creation and then selecting nodes from
557
      # that group
558
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
559
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
560

    
561
      if self.op.opportunistic_locking:
562
        self.opportunistic_locks[locking.LEVEL_NODE] = True
563
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
564
    else:
565
      (self.op.pnode_uuid, self.op.pnode) = \
566
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
567
      nodelist = [self.op.pnode_uuid]
568
      if self.op.snode is not None:
569
        (self.op.snode_uuid, self.op.snode) = \
570
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
571
        nodelist.append(self.op.snode_uuid)
572
      self.needed_locks[locking.LEVEL_NODE] = nodelist
573

    
574
    # in case of import lock the source node too
575
    if self.op.mode == constants.INSTANCE_IMPORT:
576
      src_node = self.op.src_node
577
      src_path = self.op.src_path
578

    
579
      if src_path is None:
580
        self.op.src_path = src_path = self.op.instance_name
581

    
582
      if src_node is None:
583
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
584
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
585
        self.op.src_node = None
586
        if os.path.isabs(src_path):
587
          raise errors.OpPrereqError("Importing an instance from a path"
588
                                     " requires a source node option",
589
                                     errors.ECODE_INVAL)
590
      else:
591
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
592
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
593
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
594
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
595
        if not os.path.isabs(src_path):
596
          self.op.src_path = \
597
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
598

    
599
    self.needed_locks[locking.LEVEL_NODE_RES] = \
600
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
601

    
602
  def _RunAllocator(self):
603
    """Run the allocator based on input opcode.
604

605
    """
606
    if self.op.opportunistic_locking:
607
      # Only consider nodes for which a lock is held
608
      node_name_whitelist = self.cfg.GetNodeNames(
609
        self.owned_locks(locking.LEVEL_NODE))
610
    else:
611
      node_name_whitelist = None
612

    
613
    req = _CreateInstanceAllocRequest(self.op, self.disks,
614
                                      self.nics, self.be_full,
615
                                      node_name_whitelist)
616
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
617

    
618
    ial.Run(self.op.iallocator)
619

    
620
    if not ial.success:
621
      # When opportunistic locks are used only a temporary failure is generated
622
      if self.op.opportunistic_locking:
623
        ecode = errors.ECODE_TEMP_NORES
624
      else:
625
        ecode = errors.ECODE_NORES
626

    
627
      raise errors.OpPrereqError("Can't compute nodes using"
628
                                 " iallocator '%s': %s" %
629
                                 (self.op.iallocator, ial.info),
630
                                 ecode)
631

    
632
    (self.op.pnode_uuid, self.op.pnode) = \
633
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
634
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
635
                 self.op.instance_name, self.op.iallocator,
636
                 utils.CommaJoin(ial.result))
637

    
638
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
639

    
640
    if req.RequiredNodes() == 2:
641
      (self.op.snode_uuid, self.op.snode) = \
642
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
643

    
644
  def BuildHooksEnv(self):
645
    """Build hooks env.
646

647
    This runs on master, primary and secondary nodes of the instance.
648

649
    """
650
    env = {
651
      "ADD_MODE": self.op.mode,
652
      }
653
    if self.op.mode == constants.INSTANCE_IMPORT:
654
      env["SRC_NODE"] = self.op.src_node
655
      env["SRC_PATH"] = self.op.src_path
656
      env["SRC_IMAGES"] = self.src_images
657

    
658
    env.update(BuildInstanceHookEnv(
659
      name=self.op.instance_name,
660
      primary_node_name=self.op.pnode,
661
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
662
      status=self.op.start,
663
      os_type=self.op.os_type,
664
      minmem=self.be_full[constants.BE_MINMEM],
665
      maxmem=self.be_full[constants.BE_MAXMEM],
666
      vcpus=self.be_full[constants.BE_VCPUS],
667
      nics=NICListToTuple(self, self.nics),
668
      disk_template=self.op.disk_template,
669
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
670
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
671
             for d in self.disks],
672
      bep=self.be_full,
673
      hvp=self.hv_full,
674
      hypervisor_name=self.op.hypervisor,
675
      tags=self.op.tags,
676
      ))
677

    
678
    return env
679

    
680
  def BuildHooksNodes(self):
681
    """Build hooks nodes.
682

683
    """
684
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
685
    return nl, nl
686

    
687
  def _ReadExportInfo(self):
688
    """Reads the export information from disk.
689

690
    It will override the opcode source node and path with the actual
691
    information, if these two were not specified before.
692

693
    @return: the export information
694

695
    """
696
    assert self.op.mode == constants.INSTANCE_IMPORT
697

    
698
    if self.op.src_node_uuid is None:
699
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
700
      exp_list = self.rpc.call_export_list(locked_nodes)
701
      found = False
702
      for node_uuid in exp_list:
703
        if exp_list[node_uuid].fail_msg:
704
          continue
705
        if self.op.src_path in exp_list[node_uuid].payload:
706
          found = True
707
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
708
          self.op.src_node_uuid = node_uuid
709
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
710
                                            self.op.src_path)
711
          break
712
      if not found:
713
        raise errors.OpPrereqError("No export found for relative path %s" %
714
                                   self.op.src_path, errors.ECODE_INVAL)
715

    
716
    CheckNodeOnline(self, self.op.src_node_uuid)
717
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
718
    result.Raise("No export or invalid export found in dir %s" %
719
                 self.op.src_path)
720

    
721
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
722
    if not export_info.has_section(constants.INISECT_EXP):
723
      raise errors.ProgrammerError("Corrupted export config",
724
                                   errors.ECODE_ENVIRON)
725

    
726
    ei_version = export_info.get(constants.INISECT_EXP, "version")
727
    if int(ei_version) != constants.EXPORT_VERSION:
728
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
729
                                 (ei_version, constants.EXPORT_VERSION),
730
                                 errors.ECODE_ENVIRON)
731
    return export_info
732

    
733
  def _ReadExportParams(self, einfo):
734
    """Use export parameters as defaults.
735

736
    In case the opcode doesn't specify (as in override) some instance
737
    parameters, then try to use them from the export information, if
738
    that declares them.
739

740
    """
741
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
742

    
743
    if not self.op.disks:
744
      disks = []
745
      # TODO: import the disk iv_name too
746
      for idx in range(constants.MAX_DISKS):
747
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
748
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
749
          disks.append({constants.IDISK_SIZE: disk_sz})
750
      self.op.disks = disks
751
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
752
        raise errors.OpPrereqError("No disk info specified and the export"
753
                                   " is missing the disk information",
754
                                   errors.ECODE_INVAL)
755

    
756
    if not self.op.nics:
757
      nics = []
758
      for idx in range(constants.MAX_NICS):
759
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
760
          ndict = {}
761
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
762
            nic_param_name = "nic%d_%s" % (idx, name)
763
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
764
              v = einfo.get(constants.INISECT_INS, nic_param_name)
765
              ndict[name] = v
766
          nics.append(ndict)
767
        else:
768
          break
769
      self.op.nics = nics
770

    
771
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
772
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
773

    
774
    if (self.op.hypervisor is None and
775
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
776
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
777

    
778
    if einfo.has_section(constants.INISECT_HYP):
779
      # use the export parameters but do not override the ones
780
      # specified by the user
781
      for name, value in einfo.items(constants.INISECT_HYP):
782
        if name not in self.op.hvparams:
783
          self.op.hvparams[name] = value
784

    
785
    if einfo.has_section(constants.INISECT_BEP):
786
      # use the parameters, without overriding
787
      for name, value in einfo.items(constants.INISECT_BEP):
788
        if name not in self.op.beparams:
789
          self.op.beparams[name] = value
790
        # Compatibility for the old "memory" be param
791
        if name == constants.BE_MEMORY:
792
          if constants.BE_MAXMEM not in self.op.beparams:
793
            self.op.beparams[constants.BE_MAXMEM] = value
794
          if constants.BE_MINMEM not in self.op.beparams:
795
            self.op.beparams[constants.BE_MINMEM] = value
796
    else:
797
      # try to read the parameters old style, from the main section
798
      for name in constants.BES_PARAMETERS:
799
        if (name not in self.op.beparams and
800
            einfo.has_option(constants.INISECT_INS, name)):
801
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
802

    
803
    if einfo.has_section(constants.INISECT_OSP):
804
      # use the parameters, without overriding
805
      for name, value in einfo.items(constants.INISECT_OSP):
806
        if name not in self.op.osparams:
807
          self.op.osparams[name] = value
808

    
809
  def _RevertToDefaults(self, cluster):
810
    """Revert the instance parameters to the default values.
811

812
    """
813
    # hvparams
814
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
815
    for name in self.op.hvparams.keys():
816
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
817
        del self.op.hvparams[name]
818
    # beparams
819
    be_defs = cluster.SimpleFillBE({})
820
    for name in self.op.beparams.keys():
821
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
822
        del self.op.beparams[name]
823
    # nic params
824
    nic_defs = cluster.SimpleFillNIC({})
825
    for nic in self.op.nics:
826
      for name in constants.NICS_PARAMETERS:
827
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
828
          del nic[name]
829
    # osparams
830
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
831
    for name in self.op.osparams.keys():
832
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
833
        del self.op.osparams[name]
834

    
835
  def _CalculateFileStorageDir(self):
836
    """Calculate final instance file storage dir.
837

838
    """
839
    # file storage dir calculation/check
840
    self.instance_file_storage_dir = None
841
    if self.op.disk_template in constants.DTS_FILEBASED:
842
      # build the full file storage dir path
843
      joinargs = []
844

    
845
      if self.op.disk_template == constants.DT_SHARED_FILE:
846
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
847
      else:
848
        get_fsd_fn = self.cfg.GetFileStorageDir
849

    
850
      cfg_storagedir = get_fsd_fn()
851
      if not cfg_storagedir:
852
        raise errors.OpPrereqError("Cluster file storage dir not defined",
853
                                   errors.ECODE_STATE)
854
      joinargs.append(cfg_storagedir)
855

    
856
      if self.op.file_storage_dir is not None:
857
        joinargs.append(self.op.file_storage_dir)
858

    
859
      joinargs.append(self.op.instance_name)
860

    
861
      # pylint: disable=W0142
862
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
863

    
864
  def CheckPrereq(self): # pylint: disable=R0914
865
    """Check prerequisites.
866

867
    """
868
    self._CalculateFileStorageDir()
869

    
870
    if self.op.mode == constants.INSTANCE_IMPORT:
871
      export_info = self._ReadExportInfo()
872
      self._ReadExportParams(export_info)
873
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
874
    else:
875
      self._old_instance_name = None
876

    
877
    if (not self.cfg.GetVGName() and
878
        self.op.disk_template not in constants.DTS_NOT_LVM):
879
      raise errors.OpPrereqError("Cluster does not support lvm-based"
880
                                 " instances", errors.ECODE_STATE)
881

    
882
    if (self.op.hypervisor is None or
883
        self.op.hypervisor == constants.VALUE_AUTO):
884
      self.op.hypervisor = self.cfg.GetHypervisorType()
885

    
886
    cluster = self.cfg.GetClusterInfo()
887
    enabled_hvs = cluster.enabled_hypervisors
888
    if self.op.hypervisor not in enabled_hvs:
889
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
890
                                 " cluster (%s)" %
891
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
892
                                 errors.ECODE_STATE)
893

    
894
    # Check tag validity
895
    for tag in self.op.tags:
896
      objects.TaggableObject.ValidateTag(tag)
897

    
898
    # check hypervisor parameter syntax (locally)
899
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
900
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
901
                                      self.op.hvparams)
902
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
903
    hv_type.CheckParameterSyntax(filled_hvp)
904
    self.hv_full = filled_hvp
905
    # check that we don't specify global parameters on an instance
906
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
907
                         "instance", "cluster")
908

    
909
    # fill and remember the beparams dict
910
    self.be_full = _ComputeFullBeParams(self.op, cluster)
911

    
912
    # build os parameters
913
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
914

    
915
    # now that hvp/bep are in final format, let's reset to defaults,
916
    # if told to do so
917
    if self.op.identify_defaults:
918
      self._RevertToDefaults(cluster)
919

    
920
    # NIC buildup
921
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
922
                             self.proc.GetECId())
923

    
924
    # disk checks/pre-build
925
    default_vg = self.cfg.GetVGName()
926
    self.disks = ComputeDisks(self.op, default_vg)
927

    
928
    if self.op.mode == constants.INSTANCE_IMPORT:
929
      disk_images = []
930
      for idx in range(len(self.disks)):
931
        option = "disk%d_dump" % idx
932
        if export_info.has_option(constants.INISECT_INS, option):
933
          # FIXME: are the old os-es, disk sizes, etc. useful?
934
          export_name = export_info.get(constants.INISECT_INS, option)
935
          image = utils.PathJoin(self.op.src_path, export_name)
936
          disk_images.append(image)
937
        else:
938
          disk_images.append(False)
939

    
940
      self.src_images = disk_images
941

    
942
      if self.op.instance_name == self._old_instance_name:
943
        for idx, nic in enumerate(self.nics):
944
          if nic.mac == constants.VALUE_AUTO:
945
            nic_mac_ini = "nic%d_mac" % idx
946
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
947

    
948
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
949

    
950
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
951
    if self.op.ip_check:
952
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
953
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
954
                                   (self.check_ip, self.op.instance_name),
955
                                   errors.ECODE_NOTUNIQUE)
956

    
957
    #### mac address generation
958
    # By generating here the mac address both the allocator and the hooks get
959
    # the real final mac address rather than the 'auto' or 'generate' value.
960
    # There is a race condition between the generation and the instance object
961
    # creation, which means that we know the mac is valid now, but we're not
962
    # sure it will be when we actually add the instance. If things go bad
963
    # adding the instance will abort because of a duplicate mac, and the
964
    # creation job will fail.
965
    for nic in self.nics:
966
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
967
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
968

    
969
    #### allocator run
970

    
971
    if self.op.iallocator is not None:
972
      self._RunAllocator()
973

    
974
    # Release all unneeded node locks
975
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
976
                               self.op.src_node_uuid])
977
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
978
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
979
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
980

    
981
    assert (self.owned_locks(locking.LEVEL_NODE) ==
982
            self.owned_locks(locking.LEVEL_NODE_RES)), \
983
      "Node locks differ from node resource locks"
984

    
985
    #### node related checks
986

    
987
    # check primary node
988
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
989
    assert self.pnode is not None, \
990
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
991
    if pnode.offline:
992
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
993
                                 pnode.name, errors.ECODE_STATE)
994
    if pnode.drained:
995
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
996
                                 pnode.name, errors.ECODE_STATE)
997
    if not pnode.vm_capable:
998
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
999
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1000

    
1001
    self.secondaries = []
1002

    
1003
    # Fill in any IPs from IP pools. This must happen here, because we need to
1004
    # know the nic's primary node, as specified by the iallocator
1005
    for idx, nic in enumerate(self.nics):
1006
      net_uuid = nic.network
1007
      if net_uuid is not None:
1008
        nobj = self.cfg.GetNetwork(net_uuid)
1009
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1010
        if netparams is None:
1011
          raise errors.OpPrereqError("No netparams found for network"
1012
                                     " %s. Probably not connected to"
1013
                                     " node's %s nodegroup" %
1014
                                     (nobj.name, self.pnode.name),
1015
                                     errors.ECODE_INVAL)
1016
        self.LogInfo("NIC/%d inherits netparams %s" %
1017
                     (idx, netparams.values()))
1018
        nic.nicparams = dict(netparams)
1019
        if nic.ip is not None:
1020
          if nic.ip.lower() == constants.NIC_IP_POOL:
1021
            try:
1022
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1023
            except errors.ReservationError:
1024
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1025
                                         " from the address pool" % idx,
1026
                                         errors.ECODE_STATE)
1027
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1028
          else:
1029
            try:
1030
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1031
            except errors.ReservationError:
1032
              raise errors.OpPrereqError("IP address %s already in use"
1033
                                         " or does not belong to network %s" %
1034
                                         (nic.ip, nobj.name),
1035
                                         errors.ECODE_NOTUNIQUE)
1036

    
1037
      # net is None, ip None or given
1038
      elif self.op.conflicts_check:
1039
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1040

    
1041
    # mirror node verification
1042
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1043
      if self.op.snode_uuid == pnode.uuid:
1044
        raise errors.OpPrereqError("The secondary node cannot be the"
1045
                                   " primary node", errors.ECODE_INVAL)
1046
      CheckNodeOnline(self, self.op.snode_uuid)
1047
      CheckNodeNotDrained(self, self.op.snode_uuid)
1048
      CheckNodeVmCapable(self, self.op.snode_uuid)
1049
      self.secondaries.append(self.op.snode_uuid)
1050

    
1051
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1052
      if pnode.group != snode.group:
1053
        self.LogWarning("The primary and secondary nodes are in two"
1054
                        " different node groups; the disk parameters"
1055
                        " from the first disk's node group will be"
1056
                        " used")
1057

    
1058
    nodes = [pnode]
1059
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1060
      nodes.append(snode)
1061
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1062
    excl_stor = compat.any(map(has_es, nodes))
1063
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1064
      raise errors.OpPrereqError("Disk template %s not supported with"
1065
                                 " exclusive storage" % self.op.disk_template,
1066
                                 errors.ECODE_STATE)
1067
    for disk in self.disks:
1068
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1069

    
1070
    node_uuids = [pnode.uuid] + self.secondaries
1071

    
1072
    if not self.adopt_disks:
1073
      if self.op.disk_template == constants.DT_RBD:
1074
        # _CheckRADOSFreeSpace() is just a placeholder.
1075
        # Any function that checks prerequisites can be placed here.
1076
        # Check if there is enough space on the RADOS cluster.
1077
        CheckRADOSFreeSpace()
1078
      elif self.op.disk_template == constants.DT_EXT:
1079
        # FIXME: Function that checks prereqs if needed
1080
        pass
1081
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1082
        # Check lv size requirements, if not adopting
1083
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1084
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1085
      else:
1086
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1087
        pass
1088

    
1089
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1090
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1091
                                disk[constants.IDISK_ADOPT])
1092
                     for disk in self.disks])
1093
      if len(all_lvs) != len(self.disks):
1094
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1095
                                   errors.ECODE_INVAL)
1096
      for lv_name in all_lvs:
1097
        try:
1098
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1099
          # to ReserveLV uses the same syntax
1100
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1101
        except errors.ReservationError:
1102
          raise errors.OpPrereqError("LV named %s used by another instance" %
1103
                                     lv_name, errors.ECODE_NOTUNIQUE)
1104

    
1105
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1106
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1107

    
1108
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1109
                                       vg_names.payload.keys())[pnode.uuid]
1110
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1111
      node_lvs = node_lvs.payload
1112

    
1113
      delta = all_lvs.difference(node_lvs.keys())
1114
      if delta:
1115
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1116
                                   utils.CommaJoin(delta),
1117
                                   errors.ECODE_INVAL)
1118
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1119
      if online_lvs:
1120
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1121
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1122
                                   errors.ECODE_STATE)
1123
      # update the size of disk based on what is found
1124
      for dsk in self.disks:
1125
        dsk[constants.IDISK_SIZE] = \
1126
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1127
                                        dsk[constants.IDISK_ADOPT])][0]))
1128

    
1129
    elif self.op.disk_template == constants.DT_BLOCK:
1130
      # Normalize and de-duplicate device paths
1131
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1132
                       for disk in self.disks])
1133
      if len(all_disks) != len(self.disks):
1134
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1135
                                   errors.ECODE_INVAL)
1136
      baddisks = [d for d in all_disks
1137
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1138
      if baddisks:
1139
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1140
                                   " cannot be adopted" %
1141
                                   (utils.CommaJoin(baddisks),
1142
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1143
                                   errors.ECODE_INVAL)
1144

    
1145
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1146
                                            list(all_disks))[pnode.uuid]
1147
      node_disks.Raise("Cannot get block device information from node %s" %
1148
                       pnode.name)
1149
      node_disks = node_disks.payload
1150
      delta = all_disks.difference(node_disks.keys())
1151
      if delta:
1152
        raise errors.OpPrereqError("Missing block device(s): %s" %
1153
                                   utils.CommaJoin(delta),
1154
                                   errors.ECODE_INVAL)
1155
      for dsk in self.disks:
1156
        dsk[constants.IDISK_SIZE] = \
1157
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1158

    
1159
    # Verify instance specs
1160
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1161
    ispec = {
1162
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1163
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1164
      constants.ISPEC_DISK_COUNT: len(self.disks),
1165
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1166
                                  for disk in self.disks],
1167
      constants.ISPEC_NIC_COUNT: len(self.nics),
1168
      constants.ISPEC_SPINDLE_USE: spindle_use,
1169
      }
1170

    
1171
    group_info = self.cfg.GetNodeGroup(pnode.group)
1172
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1173
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1174
                                               self.op.disk_template)
1175
    if not self.op.ignore_ipolicy and res:
1176
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1177
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1178
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1179

    
1180
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1181

    
1182
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1183
    # check OS parameters (remotely)
1184
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1185

    
1186
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1187

    
1188
    #TODO: _CheckExtParams (remotely)
1189
    # Check parameters for extstorage
1190

    
1191
    # memory check on primary node
1192
    #TODO(dynmem): use MINMEM for checking
1193
    if self.op.start:
1194
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1195
                                self.op.hvparams)
1196
      CheckNodeFreeMemory(self, self.pnode.uuid,
1197
                          "creating instance %s" % self.op.instance_name,
1198
                          self.be_full[constants.BE_MAXMEM],
1199
                          self.op.hypervisor, hvfull)
1200

    
1201
    self.dry_run_result = list(node_uuids)
1202

    
1203
  def Exec(self, feedback_fn):
1204
    """Create and add the instance to the cluster.
1205

1206
    """
1207
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1208
                self.owned_locks(locking.LEVEL_NODE)), \
1209
      "Node locks differ from node resource locks"
1210
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1211

    
1212
    ht_kind = self.op.hypervisor
1213
    if ht_kind in constants.HTS_REQ_PORT:
1214
      network_port = self.cfg.AllocatePort()
1215
    else:
1216
      network_port = None
1217

    
1218
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1219

    
1220
    # This is ugly but we got a chicken-egg problem here
1221
    # We can only take the group disk parameters, as the instance
1222
    # has no disks yet (we are generating them right here).
1223
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1224
    disks = GenerateDiskTemplate(self,
1225
                                 self.op.disk_template,
1226
                                 instance_uuid, self.pnode.uuid,
1227
                                 self.secondaries,
1228
                                 self.disks,
1229
                                 self.instance_file_storage_dir,
1230
                                 self.op.file_driver,
1231
                                 0,
1232
                                 feedback_fn,
1233
                                 self.cfg.GetGroupDiskParams(nodegroup))
1234

    
1235
    iobj = objects.Instance(name=self.op.instance_name,
1236
                            uuid=instance_uuid,
1237
                            os=self.op.os_type,
1238
                            primary_node=self.pnode.uuid,
1239
                            nics=self.nics, disks=disks,
1240
                            disk_template=self.op.disk_template,
1241
                            disks_active=False,
1242
                            admin_state=constants.ADMINST_DOWN,
1243
                            network_port=network_port,
1244
                            beparams=self.op.beparams,
1245
                            hvparams=self.op.hvparams,
1246
                            hypervisor=self.op.hypervisor,
1247
                            osparams=self.op.osparams,
1248
                            )
1249

    
1250
    if self.op.tags:
1251
      for tag in self.op.tags:
1252
        iobj.AddTag(tag)
1253

    
1254
    if self.adopt_disks:
1255
      if self.op.disk_template == constants.DT_PLAIN:
1256
        # rename LVs to the newly-generated names; we need to construct
1257
        # 'fake' LV disks with the old data, plus the new unique_id
1258
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1259
        rename_to = []
1260
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1261
          rename_to.append(t_dsk.logical_id)
1262
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1263
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1264
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1265
                                               zip(tmp_disks, rename_to))
1266
        result.Raise("Failed to rename adoped LVs")
1267
    else:
1268
      feedback_fn("* creating instance disks...")
1269
      try:
1270
        CreateDisks(self, iobj)
1271
      except errors.OpExecError:
1272
        self.LogWarning("Device creation failed")
1273
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1274
        raise
1275

    
1276
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1277

    
1278
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1279

    
1280
    # Declare that we don't want to remove the instance lock anymore, as we've
1281
    # added the instance to the config
1282
    del self.remove_locks[locking.LEVEL_INSTANCE]
1283

    
1284
    if self.op.mode == constants.INSTANCE_IMPORT:
1285
      # Release unused nodes
1286
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1287
    else:
1288
      # Release all nodes
1289
      ReleaseLocks(self, locking.LEVEL_NODE)
1290

    
1291
    disk_abort = False
1292
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1293
      feedback_fn("* wiping instance disks...")
1294
      try:
1295
        WipeDisks(self, iobj)
1296
      except errors.OpExecError, err:
1297
        logging.exception("Wiping disks failed")
1298
        self.LogWarning("Wiping instance disks failed (%s)", err)
1299
        disk_abort = True
1300

    
1301
    if disk_abort:
1302
      # Something is already wrong with the disks, don't do anything else
1303
      pass
1304
    elif self.op.wait_for_sync:
1305
      disk_abort = not WaitForSync(self, iobj)
1306
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1307
      # make sure the disks are not degraded (still sync-ing is ok)
1308
      feedback_fn("* checking mirrors status")
1309
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1310
    else:
1311
      disk_abort = False
1312

    
1313
    if disk_abort:
1314
      RemoveDisks(self, iobj)
1315
      self.cfg.RemoveInstance(iobj.uuid)
1316
      # Make sure the instance lock gets removed
1317
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1318
      raise errors.OpExecError("There are some degraded disks for"
1319
                               " this instance")
1320

    
1321
    # instance disks are now active
1322
    iobj.disks_active = True
1323

    
1324
    # Release all node resource locks
1325
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1326

    
1327
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1328
      # we need to set the disks ID to the primary node, since the
1329
      # preceding code might or might have not done it, depending on
1330
      # disk template and other options
1331
      for disk in iobj.disks:
1332
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1333
      if self.op.mode == constants.INSTANCE_CREATE:
1334
        if not self.op.no_install:
1335
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1336
                        not self.op.wait_for_sync)
1337
          if pause_sync:
1338
            feedback_fn("* pausing disk sync to install instance OS")
1339
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1340
                                                              (iobj.disks,
1341
                                                               iobj), True)
1342
            for idx, success in enumerate(result.payload):
1343
              if not success:
1344
                logging.warn("pause-sync of instance %s for disk %d failed",
1345
                             self.op.instance_name, idx)
1346

    
1347
          feedback_fn("* running the instance OS create scripts...")
1348
          # FIXME: pass debug option from opcode to backend
1349
          os_add_result = \
1350
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1351
                                          self.op.debug_level)
1352
          if pause_sync:
1353
            feedback_fn("* resuming disk sync")
1354
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1355
                                                              (iobj.disks,
1356
                                                               iobj), False)
1357
            for idx, success in enumerate(result.payload):
1358
              if not success:
1359
                logging.warn("resume-sync of instance %s for disk %d failed",
1360
                             self.op.instance_name, idx)
1361

    
1362
          os_add_result.Raise("Could not add os for instance %s"
1363
                              " on node %s" % (self.op.instance_name,
1364
                                               self.pnode.name))
1365

    
1366
      else:
1367
        if self.op.mode == constants.INSTANCE_IMPORT:
1368
          feedback_fn("* running the instance OS import scripts...")
1369

    
1370
          transfers = []
1371

    
1372
          for idx, image in enumerate(self.src_images):
1373
            if not image:
1374
              continue
1375

    
1376
            # FIXME: pass debug option from opcode to backend
1377
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1378
                                               constants.IEIO_FILE, (image, ),
1379
                                               constants.IEIO_SCRIPT,
1380
                                               (iobj.disks[idx], idx),
1381
                                               None)
1382
            transfers.append(dt)
1383

    
1384
          import_result = \
1385
            masterd.instance.TransferInstanceData(self, feedback_fn,
1386
                                                  self.op.src_node_uuid,
1387
                                                  self.pnode.uuid,
1388
                                                  self.pnode.secondary_ip,
1389
                                                  iobj, transfers)
1390
          if not compat.all(import_result):
1391
            self.LogWarning("Some disks for instance %s on node %s were not"
1392
                            " imported successfully" % (self.op.instance_name,
1393
                                                        self.pnode.name))
1394

    
1395
          rename_from = self._old_instance_name
1396

    
1397
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1398
          feedback_fn("* preparing remote import...")
1399
          # The source cluster will stop the instance before attempting to make
1400
          # a connection. In some cases stopping an instance can take a long
1401
          # time, hence the shutdown timeout is added to the connection
1402
          # timeout.
1403
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1404
                             self.op.source_shutdown_timeout)
1405
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1406

    
1407
          assert iobj.primary_node == self.pnode.uuid
1408
          disk_results = \
1409
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1410
                                          self.source_x509_ca,
1411
                                          self._cds, timeouts)
1412
          if not compat.all(disk_results):
1413
            # TODO: Should the instance still be started, even if some disks
1414
            # failed to import (valid for local imports, too)?
1415
            self.LogWarning("Some disks for instance %s on node %s were not"
1416
                            " imported successfully" % (self.op.instance_name,
1417
                                                        self.pnode.name))
1418

    
1419
          rename_from = self.source_instance_name
1420

    
1421
        else:
1422
          # also checked in the prereq part
1423
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1424
                                       % self.op.mode)
1425

    
1426
        # Run rename script on newly imported instance
1427
        assert iobj.name == self.op.instance_name
1428
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1429
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1430
                                                   rename_from,
1431
                                                   self.op.debug_level)
1432
        result.Warn("Failed to run rename script for %s on node %s" %
1433
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1434

    
1435
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1436

    
1437
    if self.op.start:
1438
      iobj.admin_state = constants.ADMINST_UP
1439
      self.cfg.Update(iobj, feedback_fn)
1440
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1441
                   self.pnode.name)
1442
      feedback_fn("* starting instance...")
1443
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1444
                                            False, self.op.reason)
1445
      result.Raise("Could not start instance")
1446

    
1447
    return list(iobj.all_nodes)
1448

    
1449

    
1450
class LUInstanceRename(LogicalUnit):
1451
  """Rename an instance.
1452

1453
  """
1454
  HPATH = "instance-rename"
1455
  HTYPE = constants.HTYPE_INSTANCE
1456

    
1457
  def CheckArguments(self):
1458
    """Check arguments.
1459

1460
    """
1461
    if self.op.ip_check and not self.op.name_check:
1462
      # TODO: make the ip check more flexible and not depend on the name check
1463
      raise errors.OpPrereqError("IP address check requires a name check",
1464
                                 errors.ECODE_INVAL)
1465

    
1466
  def BuildHooksEnv(self):
1467
    """Build hooks env.
1468

1469
    This runs on master, primary and secondary nodes of the instance.
1470

1471
    """
1472
    env = BuildInstanceHookEnvByObject(self, self.instance)
1473
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1474
    return env
1475

    
1476
  def BuildHooksNodes(self):
1477
    """Build hooks nodes.
1478

1479
    """
1480
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1481
    return (nl, nl)
1482

    
1483
  def CheckPrereq(self):
1484
    """Check prerequisites.
1485

1486
    This checks that the instance is in the cluster and is not running.
1487

1488
    """
1489
    (self.op.instance_uuid, self.op.instance_name) = \
1490
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1491
                                self.op.instance_name)
1492
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1493
    assert instance is not None
1494

    
1495
    # It should actually not happen that an instance is running with a disabled
1496
    # disk template, but in case it does, the renaming of file-based instances
1497
    # will fail horribly. Thus, we test it before.
1498
    if (instance.disk_template in constants.DTS_FILEBASED and
1499
        self.op.new_name != instance.name):
1500
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1501
                               instance.disk_template)
1502

    
1503
    CheckNodeOnline(self, instance.primary_node)
1504
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1505
                       msg="cannot rename")
1506
    self.instance = instance
1507

    
1508
    new_name = self.op.new_name
1509
    if self.op.name_check:
1510
      hostname = _CheckHostnameSane(self, new_name)
1511
      new_name = self.op.new_name = hostname.name
1512
      if (self.op.ip_check and
1513
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1514
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1515
                                   (hostname.ip, new_name),
1516
                                   errors.ECODE_NOTUNIQUE)
1517

    
1518
    instance_names = [inst.name for
1519
                      inst in self.cfg.GetAllInstancesInfo().values()]
1520
    if new_name in instance_names and new_name != instance.name:
1521
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1522
                                 new_name, errors.ECODE_EXISTS)
1523

    
1524
  def Exec(self, feedback_fn):
1525
    """Rename the instance.
1526

1527
    """
1528
    old_name = self.instance.name
1529

    
1530
    rename_file_storage = False
1531
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1532
        self.op.new_name != self.instance.name):
1533
      old_file_storage_dir = os.path.dirname(
1534
                               self.instance.disks[0].logical_id[1])
1535
      rename_file_storage = True
1536

    
1537
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1538
    # Change the instance lock. This is definitely safe while we hold the BGL.
1539
    # Otherwise the new lock would have to be added in acquired mode.
1540
    assert self.REQ_BGL
1541
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1542
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1543
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1544

    
1545
    # re-read the instance from the configuration after rename
1546
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1547

    
1548
    if rename_file_storage:
1549
      new_file_storage_dir = os.path.dirname(
1550
                               renamed_inst.disks[0].logical_id[1])
1551
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1552
                                                     old_file_storage_dir,
1553
                                                     new_file_storage_dir)
1554
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1555
                   " (but the instance has been renamed in Ganeti)" %
1556
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1557
                    old_file_storage_dir, new_file_storage_dir))
1558

    
1559
    StartInstanceDisks(self, renamed_inst, None)
1560
    # update info on disks
1561
    info = GetInstanceInfoText(renamed_inst)
1562
    for (idx, disk) in enumerate(renamed_inst.disks):
1563
      for node_uuid in renamed_inst.all_nodes:
1564
        self.cfg.SetDiskID(disk, node_uuid)
1565
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1566
        result.Warn("Error setting info on node %s for disk %s" %
1567
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1568
    try:
1569
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1570
                                                 renamed_inst, old_name,
1571
                                                 self.op.debug_level)
1572
      result.Warn("Could not run OS rename script for instance %s on node %s"
1573
                  " (but the instance has been renamed in Ganeti)" %
1574
                  (renamed_inst.name,
1575
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1576
                  self.LogWarning)
1577
    finally:
1578
      ShutdownInstanceDisks(self, renamed_inst)
1579

    
1580
    return renamed_inst.name
1581

    
1582

    
1583
class LUInstanceRemove(LogicalUnit):
1584
  """Remove an instance.
1585

1586
  """
1587
  HPATH = "instance-remove"
1588
  HTYPE = constants.HTYPE_INSTANCE
1589
  REQ_BGL = False
1590

    
1591
  def ExpandNames(self):
1592
    self._ExpandAndLockInstance()
1593
    self.needed_locks[locking.LEVEL_NODE] = []
1594
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1595
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1596

    
1597
  def DeclareLocks(self, level):
1598
    if level == locking.LEVEL_NODE:
1599
      self._LockInstancesNodes()
1600
    elif level == locking.LEVEL_NODE_RES:
1601
      # Copy node locks
1602
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1603
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1604

    
1605
  def BuildHooksEnv(self):
1606
    """Build hooks env.
1607

1608
    This runs on master, primary and secondary nodes of the instance.
1609

1610
    """
1611
    env = BuildInstanceHookEnvByObject(self, self.instance)
1612
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1613
    return env
1614

    
1615
  def BuildHooksNodes(self):
1616
    """Build hooks nodes.
1617

1618
    """
1619
    nl = [self.cfg.GetMasterNode()]
1620
    nl_post = list(self.instance.all_nodes) + nl
1621
    return (nl, nl_post)
1622

    
1623
  def CheckPrereq(self):
1624
    """Check prerequisites.
1625

1626
    This checks that the instance is in the cluster.
1627

1628
    """
1629
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1630
    assert self.instance is not None, \
1631
      "Cannot retrieve locked instance %s" % self.op.instance_name
1632

    
1633
  def Exec(self, feedback_fn):
1634
    """Remove the instance.
1635

1636
    """
1637
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1638
                 self.cfg.GetNodeName(self.instance.primary_node))
1639

    
1640
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1641
                                             self.instance,
1642
                                             self.op.shutdown_timeout,
1643
                                             self.op.reason)
1644
    if self.op.ignore_failures:
1645
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1646
    else:
1647
      result.Raise("Could not shutdown instance %s on node %s" %
1648
                   (self.instance.name,
1649
                    self.cfg.GetNodeName(self.instance.primary_node)))
1650

    
1651
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1652
            self.owned_locks(locking.LEVEL_NODE_RES))
1653
    assert not (set(self.instance.all_nodes) -
1654
                self.owned_locks(locking.LEVEL_NODE)), \
1655
      "Not owning correct locks"
1656

    
1657
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1658

    
1659

    
1660
class LUInstanceMove(LogicalUnit):
1661
  """Move an instance by data-copying.
1662

1663
  """
1664
  HPATH = "instance-move"
1665
  HTYPE = constants.HTYPE_INSTANCE
1666
  REQ_BGL = False
1667

    
1668
  def ExpandNames(self):
1669
    self._ExpandAndLockInstance()
1670
    (self.op.target_node_uuid, self.op.target_node) = \
1671
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1672
                            self.op.target_node)
1673
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1674
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1675
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1676

    
1677
  def DeclareLocks(self, level):
1678
    if level == locking.LEVEL_NODE:
1679
      self._LockInstancesNodes(primary_only=True)
1680
    elif level == locking.LEVEL_NODE_RES:
1681
      # Copy node locks
1682
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1683
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1684

    
1685
  def BuildHooksEnv(self):
1686
    """Build hooks env.
1687

1688
    This runs on master, primary and secondary nodes of the instance.
1689

1690
    """
1691
    env = {
1692
      "TARGET_NODE": self.op.target_node,
1693
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1694
      }
1695
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1696
    return env
1697

    
1698
  def BuildHooksNodes(self):
1699
    """Build hooks nodes.
1700

1701
    """
1702
    nl = [
1703
      self.cfg.GetMasterNode(),
1704
      self.instance.primary_node,
1705
      self.op.target_node_uuid,
1706
      ]
1707
    return (nl, nl)
1708

    
1709
  def CheckPrereq(self):
1710
    """Check prerequisites.
1711

1712
    This checks that the instance is in the cluster.
1713

1714
    """
1715
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1716
    assert self.instance is not None, \
1717
      "Cannot retrieve locked instance %s" % self.op.instance_name
1718

    
1719
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1720
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1721
                                 self.instance.disk_template,
1722
                                 errors.ECODE_STATE)
1723

    
1724
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1725
    assert target_node is not None, \
1726
      "Cannot retrieve locked node %s" % self.op.target_node
1727

    
1728
    self.target_node_uuid = target_node.uuid
1729
    if target_node.uuid == self.instance.primary_node:
1730
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1731
                                 (self.instance.name, target_node.name),
1732
                                 errors.ECODE_STATE)
1733

    
1734
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1735

    
1736
    for idx, dsk in enumerate(self.instance.disks):
1737
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1738
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1739
                                   " cannot copy" % idx, errors.ECODE_STATE)
1740

    
1741
    CheckNodeOnline(self, target_node.uuid)
1742
    CheckNodeNotDrained(self, target_node.uuid)
1743
    CheckNodeVmCapable(self, target_node.uuid)
1744
    cluster = self.cfg.GetClusterInfo()
1745
    group_info = self.cfg.GetNodeGroup(target_node.group)
1746
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1747
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1748
                           ignore=self.op.ignore_ipolicy)
1749

    
1750
    if self.instance.admin_state == constants.ADMINST_UP:
1751
      # check memory requirements on the secondary node
1752
      CheckNodeFreeMemory(
1753
          self, target_node.uuid, "failing over instance %s" %
1754
          self.instance.name, bep[constants.BE_MAXMEM],
1755
          self.instance.hypervisor,
1756
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1757
    else:
1758
      self.LogInfo("Not checking memory on the secondary node as"
1759
                   " instance will not be started")
1760

    
1761
    # check bridge existance
1762
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1763

    
1764
  def Exec(self, feedback_fn):
1765
    """Move an instance.
1766

1767
    The move is done by shutting it down on its present node, copying
1768
    the data over (slow) and starting it on the new node.
1769

1770
    """
1771
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1772
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1773

    
1774
    self.LogInfo("Shutting down instance %s on source node %s",
1775
                 self.instance.name, source_node.name)
1776

    
1777
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1778
            self.owned_locks(locking.LEVEL_NODE_RES))
1779

    
1780
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1781
                                             self.op.shutdown_timeout,
1782
                                             self.op.reason)
1783
    if self.op.ignore_consistency:
1784
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1785
                  " anyway. Please make sure node %s is down. Error details" %
1786
                  (self.instance.name, source_node.name, source_node.name),
1787
                  self.LogWarning)
1788
    else:
1789
      result.Raise("Could not shutdown instance %s on node %s" %
1790
                   (self.instance.name, source_node.name))
1791

    
1792
    # create the target disks
1793
    try:
1794
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1795
    except errors.OpExecError:
1796
      self.LogWarning("Device creation failed")
1797
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1798
      raise
1799

    
1800
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1801

    
1802
    errs = []
1803
    # activate, get path, copy the data over
1804
    for idx, disk in enumerate(self.instance.disks):
1805
      self.LogInfo("Copying data for disk %d", idx)
1806
      result = self.rpc.call_blockdev_assemble(
1807
                 target_node.uuid, (disk, self.instance), self.instance.name,
1808
                 True, idx)
1809
      if result.fail_msg:
1810
        self.LogWarning("Can't assemble newly created disk %d: %s",
1811
                        idx, result.fail_msg)
1812
        errs.append(result.fail_msg)
1813
        break
1814
      dev_path = result.payload
1815
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1816
                                                                self.instance),
1817
                                             target_node.name, dev_path,
1818
                                             cluster_name)
1819
      if result.fail_msg:
1820
        self.LogWarning("Can't copy data over for disk %d: %s",
1821
                        idx, result.fail_msg)
1822
        errs.append(result.fail_msg)
1823
        break
1824

    
1825
    if errs:
1826
      self.LogWarning("Some disks failed to copy, aborting")
1827
      try:
1828
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1829
      finally:
1830
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1831
        raise errors.OpExecError("Errors during disk copy: %s" %
1832
                                 (",".join(errs),))
1833

    
1834
    self.instance.primary_node = target_node.uuid
1835
    self.cfg.Update(self.instance, feedback_fn)
1836

    
1837
    self.LogInfo("Removing the disks on the original node")
1838
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1839

    
1840
    # Only start the instance if it's marked as up
1841
    if self.instance.admin_state == constants.ADMINST_UP:
1842
      self.LogInfo("Starting instance %s on node %s",
1843
                   self.instance.name, target_node.name)
1844

    
1845
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1846
                                          ignore_secondaries=True)
1847
      if not disks_ok:
1848
        ShutdownInstanceDisks(self, self.instance)
1849
        raise errors.OpExecError("Can't activate the instance's disks")
1850

    
1851
      result = self.rpc.call_instance_start(target_node.uuid,
1852
                                            (self.instance, None, None), False,
1853
                                            self.op.reason)
1854
      msg = result.fail_msg
1855
      if msg:
1856
        ShutdownInstanceDisks(self, self.instance)
1857
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1858
                                 (self.instance.name, target_node.name, msg))
1859

    
1860

    
1861
class LUInstanceMultiAlloc(NoHooksLU):
1862
  """Allocates multiple instances at the same time.
1863

1864
  """
1865
  REQ_BGL = False
1866

    
1867
  def CheckArguments(self):
1868
    """Check arguments.
1869

1870
    """
1871
    nodes = []
1872
    for inst in self.op.instances:
1873
      if inst.iallocator is not None:
1874
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1875
                                   " instance objects", errors.ECODE_INVAL)
1876
      nodes.append(bool(inst.pnode))
1877
      if inst.disk_template in constants.DTS_INT_MIRROR:
1878
        nodes.append(bool(inst.snode))
1879

    
1880
    has_nodes = compat.any(nodes)
1881
    if compat.all(nodes) ^ has_nodes:
1882
      raise errors.OpPrereqError("There are instance objects providing"
1883
                                 " pnode/snode while others do not",
1884
                                 errors.ECODE_INVAL)
1885

    
1886
    if not has_nodes and self.op.iallocator is None:
1887
      default_iallocator = self.cfg.GetDefaultIAllocator()
1888
      if default_iallocator:
1889
        self.op.iallocator = default_iallocator
1890
      else:
1891
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1892
                                   " given and no cluster-wide default"
1893
                                   " iallocator found; please specify either"
1894
                                   " an iallocator or nodes on the instances"
1895
                                   " or set a cluster-wide default iallocator",
1896
                                   errors.ECODE_INVAL)
1897

    
1898
    _CheckOpportunisticLocking(self.op)
1899

    
1900
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1901
    if dups:
1902
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1903
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1904

    
1905
  def ExpandNames(self):
1906
    """Calculate the locks.
1907

1908
    """
1909
    self.share_locks = ShareAll()
1910
    self.needed_locks = {
1911
      # iallocator will select nodes and even if no iallocator is used,
1912
      # collisions with LUInstanceCreate should be avoided
1913
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1914
      }
1915

    
1916
    if self.op.iallocator:
1917
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1918
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1919

    
1920
      if self.op.opportunistic_locking:
1921
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1922
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1923
    else:
1924
      nodeslist = []
1925
      for inst in self.op.instances:
1926
        (inst.pnode_uuid, inst.pnode) = \
1927
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1928
        nodeslist.append(inst.pnode_uuid)
1929
        if inst.snode is not None:
1930
          (inst.snode_uuid, inst.snode) = \
1931
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1932
          nodeslist.append(inst.snode_uuid)
1933

    
1934
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1935
      # Lock resources of instance's primary and secondary nodes (copy to
1936
      # prevent accidential modification)
1937
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1938

    
1939
  def CheckPrereq(self):
1940
    """Check prerequisite.
1941

1942
    """
1943
    if self.op.iallocator:
1944
      cluster = self.cfg.GetClusterInfo()
1945
      default_vg = self.cfg.GetVGName()
1946
      ec_id = self.proc.GetECId()
1947

    
1948
      if self.op.opportunistic_locking:
1949
        # Only consider nodes for which a lock is held
1950
        node_whitelist = self.cfg.GetNodeNames(
1951
                           list(self.owned_locks(locking.LEVEL_NODE)))
1952
      else:
1953
        node_whitelist = None
1954

    
1955
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1956
                                           _ComputeNics(op, cluster, None,
1957
                                                        self.cfg, ec_id),
1958
                                           _ComputeFullBeParams(op, cluster),
1959
                                           node_whitelist)
1960
               for op in self.op.instances]
1961

    
1962
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1963
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1964

    
1965
      ial.Run(self.op.iallocator)
1966

    
1967
      if not ial.success:
1968
        raise errors.OpPrereqError("Can't compute nodes using"
1969
                                   " iallocator '%s': %s" %
1970
                                   (self.op.iallocator, ial.info),
1971
                                   errors.ECODE_NORES)
1972

    
1973
      self.ia_result = ial.result
1974

    
1975
    if self.op.dry_run:
1976
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1977
        constants.JOB_IDS_KEY: [],
1978
        })
1979

    
1980
  def _ConstructPartialResult(self):
1981
    """Contructs the partial result.
1982

1983
    """
1984
    if self.op.iallocator:
1985
      (allocatable, failed_insts) = self.ia_result
1986
      allocatable_insts = map(compat.fst, allocatable)
1987
    else:
1988
      allocatable_insts = [op.instance_name for op in self.op.instances]
1989
      failed_insts = []
1990

    
1991
    return {
1992
      constants.ALLOCATABLE_KEY: allocatable_insts,
1993
      constants.FAILED_KEY: failed_insts,
1994
      }
1995

    
1996
  def Exec(self, feedback_fn):
1997
    """Executes the opcode.
1998

1999
    """
2000
    jobs = []
2001
    if self.op.iallocator:
2002
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2003
      (allocatable, failed) = self.ia_result
2004

    
2005
      for (name, node_names) in allocatable:
2006
        op = op2inst.pop(name)
2007

    
2008
        (op.pnode_uuid, op.pnode) = \
2009
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2010
        if len(node_names) > 1:
2011
          (op.snode_uuid, op.snode) = \
2012
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2013

    
2014
          jobs.append([op])
2015

    
2016
        missing = set(op2inst.keys()) - set(failed)
2017
        assert not missing, \
2018
          "Iallocator did return incomplete result: %s" % \
2019
          utils.CommaJoin(missing)
2020
    else:
2021
      jobs.extend([op] for op in self.op.instances)
2022

    
2023
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2024

    
2025

    
2026
class _InstNicModPrivate:
2027
  """Data structure for network interface modifications.
2028

2029
  Used by L{LUInstanceSetParams}.
2030

2031
  """
2032
  def __init__(self):
2033
    self.params = None
2034
    self.filled = None
2035

    
2036

    
2037
def _PrepareContainerMods(mods, private_fn):
2038
  """Prepares a list of container modifications by adding a private data field.
2039

2040
  @type mods: list of tuples; (operation, index, parameters)
2041
  @param mods: List of modifications
2042
  @type private_fn: callable or None
2043
  @param private_fn: Callable for constructing a private data field for a
2044
    modification
2045
  @rtype: list
2046

2047
  """
2048
  if private_fn is None:
2049
    fn = lambda: None
2050
  else:
2051
    fn = private_fn
2052

    
2053
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2054

    
2055

    
2056
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2057
  """Checks if nodes have enough physical CPUs
2058

2059
  This function checks if all given nodes have the needed number of
2060
  physical CPUs. In case any node has less CPUs or we cannot get the
2061
  information from the node, this function raises an OpPrereqError
2062
  exception.
2063

2064
  @type lu: C{LogicalUnit}
2065
  @param lu: a logical unit from which we get configuration data
2066
  @type node_uuids: C{list}
2067
  @param node_uuids: the list of node UUIDs to check
2068
  @type requested: C{int}
2069
  @param requested: the minimum acceptable number of physical CPUs
2070
  @type hypervisor_specs: list of pairs (string, dict of strings)
2071
  @param hypervisor_specs: list of hypervisor specifications in
2072
      pairs (hypervisor_name, hvparams)
2073
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2074
      or we cannot check the node
2075

2076
  """
2077
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2078
  for node_uuid in node_uuids:
2079
    info = nodeinfo[node_uuid]
2080
    node_name = lu.cfg.GetNodeName(node_uuid)
2081
    info.Raise("Cannot get current information from node %s" % node_name,
2082
               prereq=True, ecode=errors.ECODE_ENVIRON)
2083
    (_, _, (hv_info, )) = info.payload
2084
    num_cpus = hv_info.get("cpu_total", None)
2085
    if not isinstance(num_cpus, int):
2086
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2087
                                 " on node %s, result was '%s'" %
2088
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2089
    if requested > num_cpus:
2090
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2091
                                 "required" % (node_name, num_cpus, requested),
2092
                                 errors.ECODE_NORES)
2093

    
2094

    
2095
def GetItemFromContainer(identifier, kind, container):
2096
  """Return the item refered by the identifier.
2097

2098
  @type identifier: string
2099
  @param identifier: Item index or name or UUID
2100
  @type kind: string
2101
  @param kind: One-word item description
2102
  @type container: list
2103
  @param container: Container to get the item from
2104

2105
  """
2106
  # Index
2107
  try:
2108
    idx = int(identifier)
2109
    if idx == -1:
2110
      # Append
2111
      absidx = len(container) - 1
2112
    elif idx < 0:
2113
      raise IndexError("Not accepting negative indices other than -1")
2114
    elif idx > len(container):
2115
      raise IndexError("Got %s index %s, but there are only %s" %
2116
                       (kind, idx, len(container)))
2117
    else:
2118
      absidx = idx
2119
    return (absidx, container[idx])
2120
  except ValueError:
2121
    pass
2122

    
2123
  for idx, item in enumerate(container):
2124
    if item.uuid == identifier or item.name == identifier:
2125
      return (idx, item)
2126

    
2127
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2128
                             (kind, identifier), errors.ECODE_NOENT)
2129

    
2130

    
2131
def _ApplyContainerMods(kind, container, chgdesc, mods,
2132
                        create_fn, modify_fn, remove_fn):
2133
  """Applies descriptions in C{mods} to C{container}.
2134

2135
  @type kind: string
2136
  @param kind: One-word item description
2137
  @type container: list
2138
  @param container: Container to modify
2139
  @type chgdesc: None or list
2140
  @param chgdesc: List of applied changes
2141
  @type mods: list
2142
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2143
  @type create_fn: callable
2144
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2145
    receives absolute item index, parameters and private data object as added
2146
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2147
    as list
2148
  @type modify_fn: callable
2149
  @param modify_fn: Callback for modifying an existing item
2150
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2151
    and private data object as added by L{_PrepareContainerMods}, returns
2152
    changes as list
2153
  @type remove_fn: callable
2154
  @param remove_fn: Callback on removing item; receives absolute item index,
2155
    item and private data object as added by L{_PrepareContainerMods}
2156

2157
  """
2158
  for (op, identifier, params, private) in mods:
2159
    changes = None
2160

    
2161
    if op == constants.DDM_ADD:
2162
      # Calculate where item will be added
2163
      # When adding an item, identifier can only be an index
2164
      try:
2165
        idx = int(identifier)
2166
      except ValueError:
2167
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2168
                                   " identifier for %s" % constants.DDM_ADD,
2169
                                   errors.ECODE_INVAL)
2170
      if idx == -1:
2171
        addidx = len(container)
2172
      else:
2173
        if idx < 0:
2174
          raise IndexError("Not accepting negative indices other than -1")
2175
        elif idx > len(container):
2176
          raise IndexError("Got %s index %s, but there are only %s" %
2177
                           (kind, idx, len(container)))
2178
        addidx = idx
2179

    
2180
      if create_fn is None:
2181
        item = params
2182
      else:
2183
        (item, changes) = create_fn(addidx, params, private)
2184

    
2185
      if idx == -1:
2186
        container.append(item)
2187
      else:
2188
        assert idx >= 0
2189
        assert idx <= len(container)
2190
        # list.insert does so before the specified index
2191
        container.insert(idx, item)
2192
    else:
2193
      # Retrieve existing item
2194
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2195

    
2196
      if op == constants.DDM_REMOVE:
2197
        assert not params
2198

    
2199
        if remove_fn is not None:
2200
          remove_fn(absidx, item, private)
2201

    
2202
        changes = [("%s/%s" % (kind, absidx), "remove")]
2203

    
2204
        assert container[absidx] == item
2205
        del container[absidx]
2206
      elif op == constants.DDM_MODIFY:
2207
        if modify_fn is not None:
2208
          changes = modify_fn(absidx, item, params, private)
2209
      else:
2210
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2211

    
2212
    assert _TApplyContModsCbChanges(changes)
2213

    
2214
    if not (chgdesc is None or changes is None):
2215
      chgdesc.extend(changes)
2216

    
2217

    
2218
def _UpdateIvNames(base_index, disks):
2219
  """Updates the C{iv_name} attribute of disks.
2220

2221
  @type disks: list of L{objects.Disk}
2222

2223
  """
2224
  for (idx, disk) in enumerate(disks):
2225
    disk.iv_name = "disk/%s" % (base_index + idx, )
2226

    
2227

    
2228
class LUInstanceSetParams(LogicalUnit):
2229
  """Modifies an instances's parameters.
2230

2231
  """
2232
  HPATH = "instance-modify"
2233
  HTYPE = constants.HTYPE_INSTANCE
2234
  REQ_BGL = False
2235

    
2236
  @staticmethod
2237
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2238
    assert ht.TList(mods)
2239
    assert not mods or len(mods[0]) in (2, 3)
2240

    
2241
    if mods and len(mods[0]) == 2:
2242
      result = []
2243

    
2244
      addremove = 0
2245
      for op, params in mods:
2246
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2247
          result.append((op, -1, params))
2248
          addremove += 1
2249

    
2250
          if addremove > 1:
2251
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2252
                                       " supported at a time" % kind,
2253
                                       errors.ECODE_INVAL)
2254
        else:
2255
          result.append((constants.DDM_MODIFY, op, params))
2256

    
2257
      assert verify_fn(result)
2258
    else:
2259
      result = mods
2260

    
2261
    return result
2262

    
2263
  @staticmethod
2264
  def _CheckMods(kind, mods, key_types, item_fn):
2265
    """Ensures requested disk/NIC modifications are valid.
2266

2267
    """
2268
    for (op, _, params) in mods:
2269
      assert ht.TDict(params)
2270

    
2271
      # If 'key_types' is an empty dict, we assume we have an
2272
      # 'ext' template and thus do not ForceDictType
2273
      if key_types:
2274
        utils.ForceDictType(params, key_types)
2275

    
2276
      if op == constants.DDM_REMOVE:
2277
        if params:
2278
          raise errors.OpPrereqError("No settings should be passed when"
2279
                                     " removing a %s" % kind,
2280
                                     errors.ECODE_INVAL)
2281
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2282
        item_fn(op, params)
2283
      else:
2284
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2285

    
2286
  @staticmethod
2287
  def _VerifyDiskModification(op, params, excl_stor):
2288
    """Verifies a disk modification.
2289

2290
    """
2291
    if op == constants.DDM_ADD:
2292
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2293
      if mode not in constants.DISK_ACCESS_SET:
2294
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2295
                                   errors.ECODE_INVAL)
2296

    
2297
      size = params.get(constants.IDISK_SIZE, None)
2298
      if size is None:
2299
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2300
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2301

    
2302
      try:
2303
        size = int(size)
2304
      except (TypeError, ValueError), err:
2305
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2306
                                   errors.ECODE_INVAL)
2307

    
2308
      params[constants.IDISK_SIZE] = size
2309
      name = params.get(constants.IDISK_NAME, None)
2310
      if name is not None and name.lower() == constants.VALUE_NONE:
2311
        params[constants.IDISK_NAME] = None
2312

    
2313
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2314

    
2315
    elif op == constants.DDM_MODIFY:
2316
      if constants.IDISK_SIZE in params:
2317
        raise errors.OpPrereqError("Disk size change not possible, use"
2318
                                   " grow-disk", errors.ECODE_INVAL)
2319
      if len(params) > 2:
2320
        raise errors.OpPrereqError("Disk modification doesn't support"
2321
                                   " additional arbitrary parameters",
2322
                                   errors.ECODE_INVAL)
2323
      name = params.get(constants.IDISK_NAME, None)
2324
      if name is not None and name.lower() == constants.VALUE_NONE:
2325
        params[constants.IDISK_NAME] = None
2326

    
2327
  @staticmethod
2328
  def _VerifyNicModification(op, params):
2329
    """Verifies a network interface modification.
2330

2331
    """
2332
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2333
      ip = params.get(constants.INIC_IP, None)
2334
      name = params.get(constants.INIC_NAME, None)
2335
      req_net = params.get(constants.INIC_NETWORK, None)
2336
      link = params.get(constants.NIC_LINK, None)
2337
      mode = params.get(constants.NIC_MODE, None)
2338
      if name is not None and name.lower() == constants.VALUE_NONE:
2339
        params[constants.INIC_NAME] = None
2340
      if req_net is not None:
2341
        if req_net.lower() == constants.VALUE_NONE:
2342
          params[constants.INIC_NETWORK] = None
2343
          req_net = None
2344
        elif link is not None or mode is not None:
2345
          raise errors.OpPrereqError("If network is given"
2346
                                     " mode or link should not",
2347
                                     errors.ECODE_INVAL)
2348

    
2349
      if op == constants.DDM_ADD:
2350
        macaddr = params.get(constants.INIC_MAC, None)
2351
        if macaddr is None:
2352
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2353

    
2354
      if ip is not None:
2355
        if ip.lower() == constants.VALUE_NONE:
2356
          params[constants.INIC_IP] = None
2357
        else:
2358
          if ip.lower() == constants.NIC_IP_POOL:
2359
            if op == constants.DDM_ADD and req_net is None:
2360
              raise errors.OpPrereqError("If ip=pool, parameter network"
2361
                                         " cannot be none",
2362
                                         errors.ECODE_INVAL)
2363
          else:
2364
            if not netutils.IPAddress.IsValid(ip):
2365
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2366
                                         errors.ECODE_INVAL)
2367

    
2368
      if constants.INIC_MAC in params:
2369
        macaddr = params[constants.INIC_MAC]
2370
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2371
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2372

    
2373
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2374
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2375
                                     " modifying an existing NIC",
2376
                                     errors.ECODE_INVAL)
2377

    
2378
  def CheckArguments(self):
2379
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2380
            self.op.hvparams or self.op.beparams or self.op.os_name or
2381
            self.op.osparams or self.op.offline is not None or
2382
            self.op.runtime_mem or self.op.pnode):
2383
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2384

    
2385
    if self.op.hvparams:
2386
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2387
                           "hypervisor", "instance", "cluster")
2388

    
2389
    self.op.disks = self._UpgradeDiskNicMods(
2390
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2391
    self.op.nics = self._UpgradeDiskNicMods(
2392
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2393

    
2394
    if self.op.disks and self.op.disk_template is not None:
2395
      raise errors.OpPrereqError("Disk template conversion and other disk"
2396
                                 " changes not supported at the same time",
2397
                                 errors.ECODE_INVAL)
2398

    
2399
    if (self.op.disk_template and
2400
        self.op.disk_template in constants.DTS_INT_MIRROR and
2401
        self.op.remote_node is None):
2402
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2403
                                 " one requires specifying a secondary node",
2404
                                 errors.ECODE_INVAL)
2405

    
2406
    # Check NIC modifications
2407
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2408
                    self._VerifyNicModification)
2409

    
2410
    if self.op.pnode:
2411
      (self.op.pnode_uuid, self.op.pnode) = \
2412
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2413

    
2414
  def ExpandNames(self):
2415
    self._ExpandAndLockInstance()
2416
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2417
    # Can't even acquire node locks in shared mode as upcoming changes in
2418
    # Ganeti 2.6 will start to modify the node object on disk conversion
2419
    self.needed_locks[locking.LEVEL_NODE] = []
2420
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2421
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2422
    # Look node group to look up the ipolicy
2423
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2424

    
2425
  def DeclareLocks(self, level):
2426
    if level == locking.LEVEL_NODEGROUP:
2427
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2428
      # Acquire locks for the instance's nodegroups optimistically. Needs
2429
      # to be verified in CheckPrereq
2430
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2431
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2432
    elif level == locking.LEVEL_NODE:
2433
      self._LockInstancesNodes()
2434
      if self.op.disk_template and self.op.remote_node:
2435
        (self.op.remote_node_uuid, self.op.remote_node) = \
2436
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2437
                                self.op.remote_node)
2438
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2439
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2440
      # Copy node locks
2441
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2442
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2443

    
2444
  def BuildHooksEnv(self):
2445
    """Build hooks env.
2446

2447
    This runs on the master, primary and secondaries.
2448

2449
    """
2450
    args = {}
2451
    if constants.BE_MINMEM in self.be_new:
2452
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2453
    if constants.BE_MAXMEM in self.be_new:
2454
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2455
    if constants.BE_VCPUS in self.be_new:
2456
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2457
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2458
    # information at all.
2459

    
2460
    if self._new_nics is not None:
2461
      nics = []
2462

    
2463
      for nic in self._new_nics:
2464
        n = copy.deepcopy(nic)
2465
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2466
        n.nicparams = nicparams
2467
        nics.append(NICToTuple(self, n))
2468

    
2469
      args["nics"] = nics
2470

    
2471
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2472
    if self.op.disk_template:
2473
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2474
    if self.op.runtime_mem:
2475
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2476

    
2477
    return env
2478

    
2479
  def BuildHooksNodes(self):
2480
    """Build hooks nodes.
2481

2482
    """
2483
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2484
    return (nl, nl)
2485

    
2486
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2487
                              old_params, cluster, pnode_uuid):
2488

    
2489
    update_params_dict = dict([(key, params[key])
2490
                               for key in constants.NICS_PARAMETERS
2491
                               if key in params])
2492

    
2493
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2494
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2495

    
2496
    new_net_uuid = None
2497
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2498
    if new_net_uuid_or_name:
2499
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2500
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2501

    
2502
    if old_net_uuid:
2503
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2504

    
2505
    if new_net_uuid:
2506
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2507
      if not netparams:
2508
        raise errors.OpPrereqError("No netparams found for the network"
2509
                                   " %s, probably not connected" %
2510
                                   new_net_obj.name, errors.ECODE_INVAL)
2511
      new_params = dict(netparams)
2512
    else:
2513
      new_params = GetUpdatedParams(old_params, update_params_dict)
2514

    
2515
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2516

    
2517
    new_filled_params = cluster.SimpleFillNIC(new_params)
2518
    objects.NIC.CheckParameterSyntax(new_filled_params)
2519

    
2520
    new_mode = new_filled_params[constants.NIC_MODE]
2521
    if new_mode == constants.NIC_MODE_BRIDGED:
2522
      bridge = new_filled_params[constants.NIC_LINK]
2523
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2524
      if msg:
2525
        msg = "Error checking bridges on node '%s': %s" % \
2526
                (self.cfg.GetNodeName(pnode_uuid), msg)
2527
        if self.op.force:
2528
          self.warn.append(msg)
2529
        else:
2530
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2531

    
2532
    elif new_mode == constants.NIC_MODE_ROUTED:
2533
      ip = params.get(constants.INIC_IP, old_ip)
2534
      if ip is None:
2535
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2536
                                   " on a routed NIC", errors.ECODE_INVAL)
2537

    
2538
    elif new_mode == constants.NIC_MODE_OVS:
2539
      # TODO: check OVS link
2540
      self.LogInfo("OVS links are currently not checked for correctness")
2541

    
2542
    if constants.INIC_MAC in params:
2543
      mac = params[constants.INIC_MAC]
2544
      if mac is None:
2545
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2546
                                   errors.ECODE_INVAL)
2547
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2548
        # otherwise generate the MAC address
2549
        params[constants.INIC_MAC] = \
2550
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2551
      else:
2552
        # or validate/reserve the current one
2553
        try:
2554
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2555
        except errors.ReservationError:
2556
          raise errors.OpPrereqError("MAC address '%s' already in use"
2557
                                     " in cluster" % mac,
2558
                                     errors.ECODE_NOTUNIQUE)
2559
    elif new_net_uuid != old_net_uuid:
2560

    
2561
      def get_net_prefix(net_uuid):
2562
        mac_prefix = None
2563
        if net_uuid:
2564
          nobj = self.cfg.GetNetwork(net_uuid)
2565
          mac_prefix = nobj.mac_prefix
2566

    
2567
        return mac_prefix
2568

    
2569
      new_prefix = get_net_prefix(new_net_uuid)
2570
      old_prefix = get_net_prefix(old_net_uuid)
2571
      if old_prefix != new_prefix:
2572
        params[constants.INIC_MAC] = \
2573
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2574

    
2575
    # if there is a change in (ip, network) tuple
2576
    new_ip = params.get(constants.INIC_IP, old_ip)
2577
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2578
      if new_ip:
2579
        # if IP is pool then require a network and generate one IP
2580
        if new_ip.lower() == constants.NIC_IP_POOL:
2581
          if new_net_uuid:
2582
            try:
2583
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2584
            except errors.ReservationError:
2585
              raise errors.OpPrereqError("Unable to get a free IP"
2586
                                         " from the address pool",
2587
                                         errors.ECODE_STATE)
2588
            self.LogInfo("Chose IP %s from network %s",
2589
                         new_ip,
2590
                         new_net_obj.name)
2591
            params[constants.INIC_IP] = new_ip
2592
          else:
2593
            raise errors.OpPrereqError("ip=pool, but no network found",
2594
                                       errors.ECODE_INVAL)
2595
        # Reserve new IP if in the new network if any
2596
        elif new_net_uuid:
2597
          try:
2598
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2599
            self.LogInfo("Reserving IP %s in network %s",
2600
                         new_ip, new_net_obj.name)
2601
          except errors.ReservationError:
2602
            raise errors.OpPrereqError("IP %s not available in network %s" %
2603
                                       (new_ip, new_net_obj.name),
2604
                                       errors.ECODE_NOTUNIQUE)
2605
        # new network is None so check if new IP is a conflicting IP
2606
        elif self.op.conflicts_check:
2607
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2608

    
2609
      # release old IP if old network is not None
2610
      if old_ip and old_net_uuid:
2611
        try:
2612
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2613
        except errors.AddressPoolError:
2614
          logging.warning("Release IP %s not contained in network %s",
2615
                          old_ip, old_net_obj.name)
2616

    
2617
    # there are no changes in (ip, network) tuple and old network is not None
2618
    elif (old_net_uuid is not None and
2619
          (req_link is not None or req_mode is not None)):
2620
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2621
                                 " a NIC that is connected to a network",
2622
                                 errors.ECODE_INVAL)
2623

    
2624
    private.params = new_params
2625
    private.filled = new_filled_params
2626

    
2627
  def _PreCheckDiskTemplate(self, pnode_info):
2628
    """CheckPrereq checks related to a new disk template."""
2629
    # Arguments are passed to avoid configuration lookups
2630
    pnode_uuid = self.instance.primary_node
2631
    if self.instance.disk_template == self.op.disk_template:
2632
      raise errors.OpPrereqError("Instance already has disk template %s" %
2633
                                 self.instance.disk_template,
2634
                                 errors.ECODE_INVAL)
2635

    
2636
    if not self.cluster.IsDiskTemplateEnabled(self.instance.disk_template):
2637
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2638
                                 " cluster." % self.instance.disk_template)
2639

    
2640
    if (self.instance.disk_template,
2641
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2642
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2643
                                 " %s to %s" % (self.instance.disk_template,
2644
                                                self.op.disk_template),
2645
                                 errors.ECODE_INVAL)
2646
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2647
                       msg="cannot change disk template")
2648
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2649
      if self.op.remote_node_uuid == pnode_uuid:
2650
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2651
                                   " as the primary node of the instance" %
2652
                                   self.op.remote_node, errors.ECODE_STATE)
2653
      CheckNodeOnline(self, self.op.remote_node_uuid)
2654
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2655
      # FIXME: here we assume that the old instance type is DT_PLAIN
2656
      assert self.instance.disk_template == constants.DT_PLAIN
2657
      disks = [{constants.IDISK_SIZE: d.size,
2658
                constants.IDISK_VG: d.logical_id[0]}
2659
               for d in self.instance.disks]
2660
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2661
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2662

    
2663
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2664
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2665
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2666
                                                              snode_group)
2667
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2668
                             ignore=self.op.ignore_ipolicy)
2669
      if pnode_info.group != snode_info.group:
2670
        self.LogWarning("The primary and secondary nodes are in two"
2671
                        " different node groups; the disk parameters"
2672
                        " from the first disk's node group will be"
2673
                        " used")
2674

    
2675
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2676
      # Make sure none of the nodes require exclusive storage
2677
      nodes = [pnode_info]
2678
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2679
        assert snode_info
2680
        nodes.append(snode_info)
2681
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2682
      if compat.any(map(has_es, nodes)):
2683
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2684
                  " storage is enabled" % (self.instance.disk_template,
2685
                                           self.op.disk_template))
2686
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2687

    
2688
  def _PreCheckDisks(self, ispec):
2689
    """CheckPrereq checks related to disk changes.
2690

2691
    @type ispec: dict
2692
    @param ispec: instance specs to be updated with the new disks
2693

2694
    """
2695
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2696

    
2697
    excl_stor = compat.any(
2698
      rpc.GetExclusiveStorageForNodes(self.cfg,
2699
                                      self.instance.all_nodes).values()
2700
      )
2701

    
2702
    # Check disk modifications. This is done here and not in CheckArguments
2703
    # (as with NICs), because we need to know the instance's disk template
2704
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2705
    if self.instance.disk_template == constants.DT_EXT:
2706
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2707
    else:
2708
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2709
                      ver_fn)
2710

    
2711
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2712

    
2713
    # Check the validity of the `provider' parameter
2714
    if self.instance.disk_template in constants.DT_EXT:
2715
      for mod in self.diskmod:
2716
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2717
        if mod[0] == constants.DDM_ADD:
2718
          if ext_provider is None:
2719
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2720
                                       " '%s' missing, during disk add" %
2721
                                       (constants.DT_EXT,
2722
                                        constants.IDISK_PROVIDER),
2723
                                       errors.ECODE_NOENT)
2724
        elif mod[0] == constants.DDM_MODIFY:
2725
          if ext_provider:
2726
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2727
                                       " modification" %
2728
                                       constants.IDISK_PROVIDER,
2729
                                       errors.ECODE_INVAL)
2730
    else:
2731
      for mod in self.diskmod:
2732
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2733
        if ext_provider is not None:
2734
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2735
                                     " instances of type '%s'" %
2736
                                     (constants.IDISK_PROVIDER,
2737
                                      constants.DT_EXT),
2738
                                     errors.ECODE_INVAL)
2739

    
2740
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2741
      raise errors.OpPrereqError("Disk operations not supported for"
2742
                                 " diskless instances", errors.ECODE_INVAL)
2743

    
2744
    def _PrepareDiskMod(_, disk, params, __):
2745
      disk.name = params.get(constants.IDISK_NAME, None)
2746

    
2747
    # Verify disk changes (operating on a copy)
2748
    disks = copy.deepcopy(self.instance.disks)
2749
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2750
                        _PrepareDiskMod, None)
2751
    utils.ValidateDeviceNames("disk", disks)
2752
    if len(disks) > constants.MAX_DISKS:
2753
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2754
                                 " more" % constants.MAX_DISKS,
2755
                                 errors.ECODE_STATE)
2756
    disk_sizes = [disk.size for disk in self.instance.disks]
2757
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2758
                      self.diskmod if op == constants.DDM_ADD)
2759
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2760
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2761

    
2762
    if self.op.offline is not None and self.op.offline:
2763
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2764
                         msg="can't change to offline")
2765

    
2766
  def CheckPrereq(self):
2767
    """Check prerequisites.
2768

2769
    This only checks the instance list against the existing names.
2770

2771
    """
2772
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2773
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2774
    self.cluster = self.cfg.GetClusterInfo()
2775

    
2776
    assert self.instance is not None, \
2777
      "Cannot retrieve locked instance %s" % self.op.instance_name
2778

    
2779
    pnode_uuid = self.instance.primary_node
2780

    
2781
    self.warn = []
2782

    
2783
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2784
        not self.op.force):
2785
      # verify that the instance is not up
2786
      instance_info = self.rpc.call_instance_info(
2787
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2788
          self.instance.hvparams)
2789
      if instance_info.fail_msg:
2790
        self.warn.append("Can't get instance runtime information: %s" %
2791
                         instance_info.fail_msg)
2792
      elif instance_info.payload:
2793
        raise errors.OpPrereqError("Instance is still running on %s" %
2794
                                   self.cfg.GetNodeName(pnode_uuid),
2795
                                   errors.ECODE_STATE)
2796

    
2797
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2798
    node_uuids = list(self.instance.all_nodes)
2799
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2800

    
2801
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2802
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2803
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2804

    
2805
    # dictionary with instance information after the modification
2806
    ispec = {}
2807

    
2808
    # Prepare NIC modifications
2809
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2810

    
2811
    # OS change
2812
    if self.op.os_name and not self.op.force:
2813
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2814
                     self.op.force_variant)
2815
      instance_os = self.op.os_name
2816
    else:
2817
      instance_os = self.instance.os
2818

    
2819
    assert not (self.op.disk_template and self.op.disks), \
2820
      "Can't modify disk template and apply disk changes at the same time"
2821

    
2822
    if self.op.disk_template:
2823
      self._PreCheckDiskTemplate(pnode_info)
2824

    
2825
    self._PreCheckDisks(ispec)
2826

    
2827
    # hvparams processing
2828
    if self.op.hvparams:
2829
      hv_type = self.instance.hypervisor
2830
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2831
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2832
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2833

    
2834
      # local check
2835
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2836
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2837
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2838
      self.hv_inst = i_hvdict # the new dict (without defaults)
2839
    else:
2840
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2841
                                                   self.instance.os,
2842
                                                   self.instance.hvparams)
2843
      self.hv_new = self.hv_inst = {}
2844

    
2845
    # beparams processing
2846
    if self.op.beparams:
2847
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2848
                                  use_none=True)
2849
      objects.UpgradeBeParams(i_bedict)
2850
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2851
      be_new = self.cluster.SimpleFillBE(i_bedict)
2852
      self.be_proposed = self.be_new = be_new # the new actual values
2853
      self.be_inst = i_bedict # the new dict (without defaults)
2854
    else:
2855
      self.be_new = self.be_inst = {}
2856
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2857
    be_old = self.cluster.FillBE(self.instance)
2858

    
2859
    # CPU param validation -- checking every time a parameter is
2860
    # changed to cover all cases where either CPU mask or vcpus have
2861
    # changed
2862
    if (constants.BE_VCPUS in self.be_proposed and
2863
        constants.HV_CPU_MASK in self.hv_proposed):
2864
      cpu_list = \
2865
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2866
      # Verify mask is consistent with number of vCPUs. Can skip this
2867
      # test if only 1 entry in the CPU mask, which means same mask
2868
      # is applied to all vCPUs.
2869
      if (len(cpu_list) > 1 and
2870
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2871
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2872
                                   " CPU mask [%s]" %
2873
                                   (self.be_proposed[constants.BE_VCPUS],
2874
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2875
                                   errors.ECODE_INVAL)
2876

    
2877
      # Only perform this test if a new CPU mask is given
2878
      if constants.HV_CPU_MASK in self.hv_new:
2879
        # Calculate the largest CPU number requested
2880
        max_requested_cpu = max(map(max, cpu_list))
2881
        # Check that all of the instance's nodes have enough physical CPUs to
2882
        # satisfy the requested CPU mask
2883
        hvspecs = [(self.instance.hypervisor,
2884
                    self.cfg.GetClusterInfo()
2885
                      .hvparams[self.instance.hypervisor])]
2886
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2887
                                max_requested_cpu + 1,
2888
                                hvspecs)
2889

    
2890
    # osparams processing
2891
    if self.op.osparams:
2892
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2893
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2894
      self.os_inst = i_osdict # the new dict (without defaults)
2895
    else:
2896
      self.os_inst = {}
2897

    
2898
    #TODO(dynmem): do the appropriate check involving MINMEM
2899
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2900
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2901
      mem_check_list = [pnode_uuid]
2902
      if be_new[constants.BE_AUTO_BALANCE]:
2903
        # either we changed auto_balance to yes or it was from before
2904
        mem_check_list.extend(self.instance.secondary_nodes)
2905
      instance_info = self.rpc.call_instance_info(
2906
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2907
          self.instance.hvparams)
2908
      hvspecs = [(self.instance.hypervisor,
2909
                  self.cluster.hvparams[self.instance.hypervisor])]
2910
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2911
                                         hvspecs)
2912
      pninfo = nodeinfo[pnode_uuid]
2913
      msg = pninfo.fail_msg
2914
      if msg:
2915
        # Assume the primary node is unreachable and go ahead
2916
        self.warn.append("Can't get info from primary node %s: %s" %
2917
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2918
      else:
2919
        (_, _, (pnhvinfo, )) = pninfo.payload
2920
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2921
          self.warn.append("Node data from primary node %s doesn't contain"
2922
                           " free memory information" %
2923
                           self.cfg.GetNodeName(pnode_uuid))
2924
        elif instance_info.fail_msg:
2925
          self.warn.append("Can't get instance runtime information: %s" %
2926
                           instance_info.fail_msg)
2927
        else:
2928
          if instance_info.payload:
2929
            current_mem = int(instance_info.payload["memory"])
2930
          else:
2931
            # Assume instance not running
2932
            # (there is a slight race condition here, but it's not very
2933
            # probable, and we have no other way to check)
2934
            # TODO: Describe race condition
2935
            current_mem = 0
2936
          #TODO(dynmem): do the appropriate check involving MINMEM
2937
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2938
                      pnhvinfo["memory_free"])
2939
          if miss_mem > 0:
2940
            raise errors.OpPrereqError("This change will prevent the instance"
2941
                                       " from starting, due to %d MB of memory"
2942
                                       " missing on its primary node" %
2943
                                       miss_mem, errors.ECODE_NORES)
2944

    
2945
      if be_new[constants.BE_AUTO_BALANCE]:
2946
        for node_uuid, nres in nodeinfo.items():
2947
          if node_uuid not in self.instance.secondary_nodes:
2948
            continue
2949
          nres.Raise("Can't get info from secondary node %s" %
2950
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2951
                     ecode=errors.ECODE_STATE)
2952
          (_, _, (nhvinfo, )) = nres.payload
2953
          if not isinstance(nhvinfo.get("memory_free", None), int):
2954
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2955
                                       " memory information" %
2956
                                       self.cfg.GetNodeName(node_uuid),
2957
                                       errors.ECODE_STATE)
2958
          #TODO(dynmem): do the appropriate check involving MINMEM
2959
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2960
            raise errors.OpPrereqError("This change will prevent the instance"
2961
                                       " from failover to its secondary node"
2962
                                       " %s, due to not enough memory" %
2963
                                       self.cfg.GetNodeName(node_uuid),
2964
                                       errors.ECODE_STATE)
2965

    
2966
    if self.op.runtime_mem:
2967
      remote_info = self.rpc.call_instance_info(
2968
         self.instance.primary_node, self.instance.name,
2969
         self.instance.hypervisor,
2970
         self.cluster.hvparams[self.instance.hypervisor])
2971
      remote_info.Raise("Error checking node %s" %
2972
                        self.cfg.GetNodeName(self.instance.primary_node))
2973
      if not remote_info.payload: # not running already
2974
        raise errors.OpPrereqError("Instance %s is not running" %
2975
                                   self.instance.name, errors.ECODE_STATE)
2976

    
2977
      current_memory = remote_info.payload["memory"]
2978
      if (not self.op.force and
2979
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2980
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2981
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2982
                                   " and %d MB of memory unless --force is"
2983
                                   " given" %
2984
                                   (self.instance.name,
2985
                                    self.be_proposed[constants.BE_MINMEM],
2986
                                    self.be_proposed[constants.BE_MAXMEM]),
2987
                                   errors.ECODE_INVAL)
2988

    
2989
      delta = self.op.runtime_mem - current_memory
2990
      if delta > 0:
2991
        CheckNodeFreeMemory(
2992
            self, self.instance.primary_node,
2993
            "ballooning memory for instance %s" % self.instance.name, delta,
2994
            self.instance.hypervisor,
2995
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2996

    
2997
    # make self.cluster visible in the functions below
2998
    cluster = self.cluster
2999

    
3000
    def _PrepareNicCreate(_, params, private):
3001
      self._PrepareNicModification(params, private, None, None,
3002
                                   {}, cluster, pnode_uuid)
3003
      return (None, None)
3004

    
3005
    def _PrepareNicMod(_, nic, params, private):
3006
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3007
                                   nic.nicparams, cluster, pnode_uuid)
3008
      return None
3009

    
3010
    def _PrepareNicRemove(_, params, __):
3011
      ip = params.ip
3012
      net = params.network
3013
      if net is not None and ip is not None:
3014
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3015

    
3016
    # Verify NIC changes (operating on copy)
3017
    nics = self.instance.nics[:]
3018
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3019
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3020
    if len(nics) > constants.MAX_NICS:
3021
      raise errors.OpPrereqError("Instance has too many network interfaces"
3022
                                 " (%d), cannot add more" % constants.MAX_NICS,
3023
                                 errors.ECODE_STATE)
3024

    
3025
    # Pre-compute NIC changes (necessary to use result in hooks)
3026
    self._nic_chgdesc = []
3027
    if self.nicmod:
3028
      # Operate on copies as this is still in prereq
3029
      nics = [nic.Copy() for nic in self.instance.nics]
3030
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3031
                          self._CreateNewNic, self._ApplyNicMods, None)
3032
      # Verify that NIC names are unique and valid
3033
      utils.ValidateDeviceNames("NIC", nics)
3034
      self._new_nics = nics
3035
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3036
    else:
3037
      self._new_nics = None
3038
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3039

    
3040
    if not self.op.ignore_ipolicy:
3041
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3042
                                                              group_info)
3043

    
3044
      # Fill ispec with backend parameters
3045
      ispec[constants.ISPEC_SPINDLE_USE] = \
3046
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3047
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3048
                                                         None)
3049

    
3050
      # Copy ispec to verify parameters with min/max values separately
3051
      if self.op.disk_template:
3052
        new_disk_template = self.op.disk_template
3053
      else:
3054
        new_disk_template = self.instance.disk_template
3055
      ispec_max = ispec.copy()
3056
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3057
        self.be_new.get(constants.BE_MAXMEM, None)
3058
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3059
                                                     new_disk_template)
3060
      ispec_min = ispec.copy()
3061
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3062
        self.be_new.get(constants.BE_MINMEM, None)
3063
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3064
                                                     new_disk_template)
3065

    
3066
      if (res_max or res_min):
3067
        # FIXME: Improve error message by including information about whether
3068
        # the upper or lower limit of the parameter fails the ipolicy.
3069
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3070
               (group_info, group_info.name,
3071
                utils.CommaJoin(set(res_max + res_min))))
3072
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3073

    
3074
  def _ConvertPlainToDrbd(self, feedback_fn):
3075
    """Converts an instance from plain to drbd.
3076

3077
    """
3078
    feedback_fn("Converting template to drbd")
3079
    pnode_uuid = self.instance.primary_node
3080
    snode_uuid = self.op.remote_node_uuid
3081

    
3082
    assert self.instance.disk_template == constants.DT_PLAIN
3083

    
3084
    # create a fake disk info for _GenerateDiskTemplate
3085
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3086
                  constants.IDISK_VG: d.logical_id[0],
3087
                  constants.IDISK_NAME: d.name}
3088
                 for d in self.instance.disks]
3089
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3090
                                     self.instance.uuid, pnode_uuid,
3091
                                     [snode_uuid], disk_info, None, None, 0,
3092
                                     feedback_fn, self.diskparams)
3093
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3094
                                        self.diskparams)
3095
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3096
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3097
    info = GetInstanceInfoText(self.instance)
3098
    feedback_fn("Creating additional volumes...")
3099
    # first, create the missing data and meta devices
3100
    for disk in anno_disks:
3101
      # unfortunately this is... not too nice
3102
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3103
                           info, True, p_excl_stor)
3104
      for child in disk.children:
3105
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3106
                             s_excl_stor)
3107
    # at this stage, all new LVs have been created, we can rename the
3108
    # old ones
3109
    feedback_fn("Renaming original volumes...")
3110
    rename_list = [(o, n.children[0].logical_id)
3111
                   for (o, n) in zip(self.instance.disks, new_disks)]
3112
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3113
    result.Raise("Failed to rename original LVs")
3114

    
3115
    feedback_fn("Initializing DRBD devices...")
3116
    # all child devices are in place, we can now create the DRBD devices
3117
    try:
3118
      for disk in anno_disks:
3119
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3120
                                       (snode_uuid, s_excl_stor)]:
3121
          f_create = node_uuid == pnode_uuid
3122
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3123
                               f_create, excl_stor)
3124
    except errors.GenericError, e:
3125
      feedback_fn("Initializing of DRBD devices failed;"
3126
                  " renaming back original volumes...")
3127
      for disk in new_disks:
3128
        self.cfg.SetDiskID(disk, pnode_uuid)
3129
      rename_back_list = [(n.children[0], o.logical_id)
3130
                          for (n, o) in zip(new_disks, self.instance.disks)]
3131
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3132
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3133
      raise
3134

    
3135
    # at this point, the instance has been modified
3136
    self.instance.disk_template = constants.DT_DRBD8
3137
    self.instance.disks = new_disks
3138
    self.cfg.Update(self.instance, feedback_fn)
3139

    
3140
    # Release node locks while waiting for sync
3141
    ReleaseLocks(self, locking.LEVEL_NODE)
3142

    
3143
    # disks are created, waiting for sync
3144
    disk_abort = not WaitForSync(self, self.instance,
3145
                                 oneshot=not self.op.wait_for_sync)
3146
    if disk_abort:
3147
      raise errors.OpExecError("There are some degraded disks for"
3148
                               " this instance, please cleanup manually")
3149

    
3150
    # Node resource locks will be released by caller
3151

    
3152
  def _ConvertDrbdToPlain(self, feedback_fn):
3153
    """Converts an instance from drbd to plain.
3154

3155
    """
3156
    assert len(self.instance.secondary_nodes) == 1
3157
    assert self.instance.disk_template == constants.DT_DRBD8
3158

    
3159
    pnode_uuid = self.instance.primary_node
3160
    snode_uuid = self.instance.secondary_nodes[0]
3161
    feedback_fn("Converting template to plain")
3162

    
3163
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3164
    new_disks = [d.children[0] for d in self.instance.disks]
3165

    
3166
    # copy over size, mode and name
3167
    for parent, child in zip(old_disks, new_disks):
3168
      child.size = parent.size
3169
      child.mode = parent.mode
3170
      child.name = parent.name
3171

    
3172
    # this is a DRBD disk, return its port to the pool
3173
    # NOTE: this must be done right before the call to cfg.Update!
3174
    for disk in old_disks:
3175
      tcp_port = disk.logical_id[2]
3176
      self.cfg.AddTcpUdpPort(tcp_port)
3177

    
3178
    # update instance structure
3179
    self.instance.disks = new_disks
3180
    self.instance.disk_template = constants.DT_PLAIN
3181
    _UpdateIvNames(0, self.instance.disks)
3182
    self.cfg.Update(self.instance, feedback_fn)
3183

    
3184
    # Release locks in case removing disks takes a while
3185
    ReleaseLocks(self, locking.LEVEL_NODE)
3186

    
3187
    feedback_fn("Removing volumes on the secondary node...")
3188
    for disk in old_disks:
3189
      self.cfg.SetDiskID(disk, snode_uuid)
3190
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3191
      if msg:
3192
        self.LogWarning("Could not remove block device %s on node %s,"
3193
                        " continuing anyway: %s", disk.iv_name,
3194
                        self.cfg.GetNodeName(snode_uuid), msg)
3195

    
3196
    feedback_fn("Removing unneeded volumes on the primary node...")
3197
    for idx, disk in enumerate(old_disks):
3198
      meta = disk.children[1]
3199
      self.cfg.SetDiskID(meta, pnode_uuid)
3200
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3201
      if msg:
3202
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3203
                        " continuing anyway: %s", idx,
3204
                        self.cfg.GetNodeName(pnode_uuid), msg)
3205

    
3206
  def _CreateNewDisk(self, idx, params, _):
3207
    """Creates a new disk.
3208

3209
    """
3210
    # add a new disk
3211
    if self.instance.disk_template in constants.DTS_FILEBASED:
3212
      (file_driver, file_path) = self.instance.disks[0].logical_id
3213
      file_path = os.path.dirname(file_path)
3214
    else:
3215
      file_driver = file_path = None
3216

    
3217
    disk = \
3218
      GenerateDiskTemplate(self, self.instance.disk_template,
3219
                           self.instance.uuid, self.instance.primary_node,
3220
                           self.instance.secondary_nodes, [params], file_path,
3221
                           file_driver, idx, self.Log, self.diskparams)[0]
3222

    
3223
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3224

    
3225
    if self.cluster.prealloc_wipe_disks:
3226
      # Wipe new disk
3227
      WipeOrCleanupDisks(self, self.instance,
3228
                         disks=[(idx, disk, 0)],
3229
                         cleanup=new_disks)
3230

    
3231
    return (disk, [
3232
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3233
      ])
3234

    
3235
  @staticmethod
3236
  def _ModifyDisk(idx, disk, params, _):
3237
    """Modifies a disk.
3238

3239
    """
3240
    changes = []
3241
    mode = params.get(constants.IDISK_MODE, None)
3242
    if mode:
3243
      disk.mode = mode
3244
      changes.append(("disk.mode/%d" % idx, disk.mode))
3245

    
3246
    name = params.get(constants.IDISK_NAME, None)
3247
    disk.name = name
3248
    changes.append(("disk.name/%d" % idx, disk.name))
3249

    
3250
    return changes
3251

    
3252
  def _RemoveDisk(self, idx, root, _):
3253
    """Removes a disk.
3254

3255
    """
3256
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3257
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3258
                             self.instance.primary_node):
3259
      self.cfg.SetDiskID(disk, node_uuid)
3260
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3261
      if msg:
3262
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3263
                        " continuing anyway", idx,
3264
                        self.cfg.GetNodeName(node_uuid), msg)
3265

    
3266
    # if this is a DRBD disk, return its port to the pool
3267
    if root.dev_type in constants.LDS_DRBD:
3268
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3269

    
3270
  def _CreateNewNic(self, idx, params, private):
3271
    """Creates data structure for a new network interface.
3272

3273
    """
3274
    mac = params[constants.INIC_MAC]
3275
    ip = params.get(constants.INIC_IP, None)
3276
    net = params.get(constants.INIC_NETWORK, None)
3277
    name = params.get(constants.INIC_NAME, None)
3278
    net_uuid = self.cfg.LookupNetwork(net)
3279
    #TODO: not private.filled?? can a nic have no nicparams??
3280
    nicparams = private.filled
3281
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3282
                       nicparams=nicparams)
3283
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3284

    
3285
    return (nobj, [
3286
      ("nic.%d" % idx,
3287
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3288
       (mac, ip, private.filled[constants.NIC_MODE],
3289
       private.filled[constants.NIC_LINK],
3290
       net)),
3291
      ])
3292

    
3293
  def _ApplyNicMods(self, idx, nic, params, private):
3294
    """Modifies a network interface.
3295

3296
    """
3297
    changes = []
3298

    
3299
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3300
      if key in params:
3301
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3302
        setattr(nic, key, params[key])
3303

    
3304
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3305
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3306
    if new_net_uuid != nic.network:
3307
      changes.append(("nic.network/%d" % idx, new_net))
3308
      nic.network = new_net_uuid
3309

    
3310
    if private.filled:
3311
      nic.nicparams = private.filled
3312

    
3313
      for (key, val) in nic.nicparams.items():
3314
        changes.append(("nic.%s/%d" % (key, idx), val))
3315

    
3316
    return changes
3317

    
3318
  def Exec(self, feedback_fn):
3319
    """Modifies an instance.
3320

3321
    All parameters take effect only at the next restart of the instance.
3322

3323
    """
3324
    # Process here the warnings from CheckPrereq, as we don't have a
3325
    # feedback_fn there.
3326
    # TODO: Replace with self.LogWarning
3327
    for warn in self.warn:
3328
      feedback_fn("WARNING: %s" % warn)
3329

    
3330
    assert ((self.op.disk_template is None) ^
3331
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3332
      "Not owning any node resource locks"
3333

    
3334
    result = []
3335

    
3336
    # New primary node
3337
    if self.op.pnode_uuid:
3338
      self.instance.primary_node = self.op.pnode_uuid
3339

    
3340
    # runtime memory
3341
    if self.op.runtime_mem:
3342
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3343
                                                     self.instance,
3344
                                                     self.op.runtime_mem)
3345
      rpcres.Raise("Cannot modify instance runtime memory")
3346
      result.append(("runtime_memory", self.op.runtime_mem))
3347

    
3348
    # Apply disk changes
3349
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3350
                        self._CreateNewDisk, self._ModifyDisk,
3351
                        self._RemoveDisk)
3352
    _UpdateIvNames(0, self.instance.disks)
3353

    
3354
    if self.op.disk_template:
3355
      if __debug__:
3356
        check_nodes = set(self.instance.all_nodes)
3357
        if self.op.remote_node_uuid:
3358
          check_nodes.add(self.op.remote_node_uuid)
3359
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3360
          owned = self.owned_locks(level)
3361
          assert not (check_nodes - owned), \
3362
            ("Not owning the correct locks, owning %r, expected at least %r" %
3363
             (owned, check_nodes))
3364

    
3365
      r_shut = ShutdownInstanceDisks(self, self.instance)
3366
      if not r_shut:
3367
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3368
                                 " proceed with disk template conversion")
3369
      mode = (self.instance.disk_template, self.op.disk_template)
3370
      try:
3371
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3372
      except:
3373
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3374
        raise
3375
      result.append(("disk_template", self.op.disk_template))
3376

    
3377
      assert self.instance.disk_template == self.op.disk_template, \
3378
        ("Expected disk template '%s', found '%s'" %
3379
         (self.op.disk_template, self.instance.disk_template))
3380

    
3381
    # Release node and resource locks if there are any (they might already have
3382
    # been released during disk conversion)
3383
    ReleaseLocks(self, locking.LEVEL_NODE)
3384
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3385

    
3386
    # Apply NIC changes
3387
    if self._new_nics is not None:
3388
      self.instance.nics = self._new_nics
3389
      result.extend(self._nic_chgdesc)
3390

    
3391
    # hvparams changes
3392
    if self.op.hvparams:
3393
      self.instance.hvparams = self.hv_inst
3394
      for key, val in self.op.hvparams.iteritems():
3395
        result.append(("hv/%s" % key, val))
3396

    
3397
    # beparams changes
3398
    if self.op.beparams:
3399
      self.instance.beparams = self.be_inst
3400
      for key, val in self.op.beparams.iteritems():
3401
        result.append(("be/%s" % key, val))
3402

    
3403
    # OS change
3404
    if self.op.os_name:
3405
      self.instance.os = self.op.os_name
3406

    
3407
    # osparams changes
3408
    if self.op.osparams:
3409
      self.instance.osparams = self.os_inst
3410
      for key, val in self.op.osparams.iteritems():
3411
        result.append(("os/%s" % key, val))
3412

    
3413
    if self.op.offline is None:
3414
      # Ignore
3415
      pass
3416
    elif self.op.offline:
3417
      # Mark instance as offline
3418
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3419
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3420
    else:
3421
      # Mark instance as online, but stopped
3422
      self.cfg.MarkInstanceDown(self.instance.uuid)
3423
      result.append(("admin_state", constants.ADMINST_DOWN))
3424

    
3425
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3426

    
3427
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3428
                self.owned_locks(locking.LEVEL_NODE)), \
3429
      "All node locks should have been released by now"
3430

    
3431
    return result
3432

    
3433
  _DISK_CONVERSIONS = {
3434
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3435
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3436
    }
3437

    
3438

    
3439
class LUInstanceChangeGroup(LogicalUnit):
3440
  HPATH = "instance-change-group"
3441
  HTYPE = constants.HTYPE_INSTANCE
3442
  REQ_BGL = False
3443

    
3444
  def ExpandNames(self):
3445
    self.share_locks = ShareAll()
3446

    
3447
    self.needed_locks = {
3448
      locking.LEVEL_NODEGROUP: [],
3449
      locking.LEVEL_NODE: [],
3450
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3451
      }
3452

    
3453
    self._ExpandAndLockInstance()
3454

    
3455
    if self.op.target_groups:
3456
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3457
                                  self.op.target_groups)
3458
    else:
3459
      self.req_target_uuids = None
3460

    
3461
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3462

    
3463
  def DeclareLocks(self, level):
3464
    if level == locking.LEVEL_NODEGROUP:
3465
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3466

    
3467
      if self.req_target_uuids:
3468
        lock_groups = set(self.req_target_uuids)
3469

    
3470
        # Lock all groups used by instance optimistically; this requires going
3471
        # via the node before it's locked, requiring verification later on
3472
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3473
        lock_groups.update(instance_groups)
3474
      else:
3475
        # No target groups, need to lock all of them
3476
        lock_groups = locking.ALL_SET
3477

    
3478
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3479

    
3480
    elif level == locking.LEVEL_NODE:
3481
      if self.req_target_uuids:
3482
        # Lock all nodes used by instances
3483
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3484
        self._LockInstancesNodes()
3485

    
3486
        # Lock all nodes in all potential target groups
3487
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3488
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3489
        member_nodes = [node_uuid
3490
                        for group in lock_groups
3491
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3492
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3493
      else:
3494
        # Lock all nodes as all groups are potential targets
3495
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3496

    
3497
  def CheckPrereq(self):
3498
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3499
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3500
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3501

    
3502
    assert (self.req_target_uuids is None or
3503
            owned_groups.issuperset(self.req_target_uuids))
3504
    assert owned_instance_names == set([self.op.instance_name])
3505

    
3506
    # Get instance information
3507
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3508

    
3509
    # Check if node groups for locked instance are still correct
3510
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3511
      ("Instance %s's nodes changed while we kept the lock" %
3512
       self.op.instance_name)
3513

    
3514
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3515
                                          owned_groups)
3516

    
3517
    if self.req_target_uuids:
3518
      # User requested specific target groups
3519
      self.target_uuids = frozenset(self.req_target_uuids)
3520
    else:
3521
      # All groups except those used by the instance are potential targets
3522
      self.target_uuids = owned_groups - inst_groups
3523

    
3524
    conflicting_groups = self.target_uuids & inst_groups
3525
    if conflicting_groups:
3526
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3527
                                 " used by the instance '%s'" %
3528
                                 (utils.CommaJoin(conflicting_groups),
3529
                                  self.op.instance_name),
3530
                                 errors.ECODE_INVAL)
3531

    
3532
    if not self.target_uuids:
3533
      raise errors.OpPrereqError("There are no possible target groups",
3534
                                 errors.ECODE_INVAL)
3535

    
3536
  def BuildHooksEnv(self):
3537
    """Build hooks env.
3538

3539
    """
3540
    assert self.target_uuids
3541

    
3542
    env = {
3543
      "TARGET_GROUPS": " ".join(self.target_uuids),
3544
      }
3545

    
3546
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3547

    
3548
    return env
3549

    
3550
  def BuildHooksNodes(self):
3551
    """Build hooks nodes.
3552

3553
    """
3554
    mn = self.cfg.GetMasterNode()
3555
    return ([mn], [mn])
3556

    
3557
  def Exec(self, feedback_fn):
3558
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3559

    
3560
    assert instances == [self.op.instance_name], "Instance not locked"
3561

    
3562
    req = iallocator.IAReqGroupChange(instances=instances,
3563
                                      target_groups=list(self.target_uuids))
3564
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3565

    
3566
    ial.Run(self.op.iallocator)
3567

    
3568
    if not ial.success:
3569
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3570
                                 " instance '%s' using iallocator '%s': %s" %
3571
                                 (self.op.instance_name, self.op.iallocator,
3572
                                  ial.info), errors.ECODE_NORES)
3573

    
3574
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3575

    
3576
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3577
                 " instance '%s'", len(jobs), self.op.instance_name)
3578

    
3579
    return ResultWithJobs(jobs)