Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 884dc063

History | View | Annotate | Download (141.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import pathutils
40
from ganeti import rpc
41
from ganeti import utils
42

    
43
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
44

    
45
from ganeti.cmdlib.common import INSTANCE_DOWN, \
46
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
47
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
48
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
49
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
50
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
51
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
52
  CheckDiskTemplateEnabled
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176
    vlan = nic.get(constants.INIC_VLAN, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
187
      raise errors.OpPrereqError("VLAN is given, but network mode is not"
188
                                 " openvswitch", errors.ECODE_INVAL)
189

    
190
    # ip validity checks
191
    if ip is None or ip.lower() == constants.VALUE_NONE:
192
      nic_ip = None
193
    elif ip.lower() == constants.VALUE_AUTO:
194
      if not op.name_check:
195
        raise errors.OpPrereqError("IP address set to auto but name checks"
196
                                   " have been skipped",
197
                                   errors.ECODE_INVAL)
198
      nic_ip = default_ip
199
    else:
200
      # We defer pool operations until later, so that the iallocator has
201
      # filled in the instance's node(s) dimara
202
      if ip.lower() == constants.NIC_IP_POOL:
203
        if net is None:
204
          raise errors.OpPrereqError("if ip=pool, parameter network"
205
                                     " must be passed too",
206
                                     errors.ECODE_INVAL)
207

    
208
      elif not netutils.IPAddress.IsValid(ip):
209
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
210
                                   errors.ECODE_INVAL)
211

    
212
      nic_ip = ip
213

    
214
    # TODO: check the ip address for uniqueness
215
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
216
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
217
                                 errors.ECODE_INVAL)
218

    
219
    # MAC address verification
220
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
221
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
222
      mac = utils.NormalizeAndValidateMac(mac)
223

    
224
      try:
225
        # TODO: We need to factor this out
226
        cfg.ReserveMAC(mac, ec_id)
227
      except errors.ReservationError:
228
        raise errors.OpPrereqError("MAC address %s already in use"
229
                                   " in cluster" % mac,
230
                                   errors.ECODE_NOTUNIQUE)
231

    
232
    #  Build nic parameters
233
    nicparams = {}
234
    if nic_mode_req:
235
      nicparams[constants.NIC_MODE] = nic_mode
236
    if link:
237
      nicparams[constants.NIC_LINK] = link
238
    if vlan:
239
      nicparams[constants.NIC_VLAN] = vlan
240

    
241
    check_params = cluster.SimpleFillNIC(nicparams)
242
    objects.NIC.CheckParameterSyntax(check_params)
243
    net_uuid = cfg.LookupNetwork(net)
244
    name = nic.get(constants.INIC_NAME, None)
245
    if name is not None and name.lower() == constants.VALUE_NONE:
246
      name = None
247
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
248
                          network=net_uuid, nicparams=nicparams)
249
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
250
    nics.append(nic_obj)
251

    
252
  return nics
253

    
254

    
255
def _CheckForConflictingIp(lu, ip, node_uuid):
256
  """In case of conflicting IP address raise error.
257

258
  @type ip: string
259
  @param ip: IP address
260
  @type node_uuid: string
261
  @param node_uuid: node UUID
262

263
  """
264
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
265
  if conf_net is not None:
266
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
267
                                " network %s, but the target NIC does not." %
268
                                (ip, conf_net)),
269
                               errors.ECODE_STATE)
270

    
271
  return (None, None)
272

    
273

    
274
def _ComputeIPolicyInstanceSpecViolation(
275
  ipolicy, instance_spec, disk_template,
276
  _compute_fn=ComputeIPolicySpecViolation):
277
  """Compute if instance specs meets the specs of ipolicy.
278

279
  @type ipolicy: dict
280
  @param ipolicy: The ipolicy to verify against
281
  @param instance_spec: dict
282
  @param instance_spec: The instance spec to verify
283
  @type disk_template: string
284
  @param disk_template: the disk template of the instance
285
  @param _compute_fn: The function to verify ipolicy (unittest only)
286
  @see: L{ComputeIPolicySpecViolation}
287

288
  """
289
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
290
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
291
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
292
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
293
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
294
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
295

    
296
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
297
                     disk_sizes, spindle_use, disk_template)
298

    
299

    
300
def _CheckOSVariant(os_obj, name):
301
  """Check whether an OS name conforms to the os variants specification.
302

303
  @type os_obj: L{objects.OS}
304
  @param os_obj: OS object to check
305
  @type name: string
306
  @param name: OS name passed by the user, to check for validity
307

308
  """
309
  variant = objects.OS.GetVariant(name)
310
  if not os_obj.supported_variants:
311
    if variant:
312
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
313
                                 " passed)" % (os_obj.name, variant),
314
                                 errors.ECODE_INVAL)
315
    return
316
  if not variant:
317
    raise errors.OpPrereqError("OS name must include a variant",
318
                               errors.ECODE_INVAL)
319

    
320
  if variant not in os_obj.supported_variants:
321
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
322

    
323

    
324
class LUInstanceCreate(LogicalUnit):
325
  """Create an instance.
326

327
  """
328
  HPATH = "instance-add"
329
  HTYPE = constants.HTYPE_INSTANCE
330
  REQ_BGL = False
331

    
332
  def _CheckDiskTemplateValid(self):
333
    """Checks validity of disk template.
334

335
    """
336
    cluster = self.cfg.GetClusterInfo()
337
    if self.op.disk_template is None:
338
      # FIXME: It would be better to take the default disk template from the
339
      # ipolicy, but for the ipolicy we need the primary node, which we get from
340
      # the iallocator, which wants the disk template as input. To solve this
341
      # chicken-and-egg problem, it should be possible to specify just a node
342
      # group from the iallocator and take the ipolicy from that.
343
      self.op.disk_template = cluster.enabled_disk_templates[0]
344
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
345

    
346
  def _CheckDiskArguments(self):
347
    """Checks validity of disk-related arguments.
348

349
    """
350
    # check that disk's names are unique and valid
351
    utils.ValidateDeviceNames("disk", self.op.disks)
352

    
353
    self._CheckDiskTemplateValid()
354

    
355
    # check disks. parameter names and consistent adopt/no-adopt strategy
356
    has_adopt = has_no_adopt = False
357
    for disk in self.op.disks:
358
      if self.op.disk_template != constants.DT_EXT:
359
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
360
      if constants.IDISK_ADOPT in disk:
361
        has_adopt = True
362
      else:
363
        has_no_adopt = True
364
    if has_adopt and has_no_adopt:
365
      raise errors.OpPrereqError("Either all disks are adopted or none is",
366
                                 errors.ECODE_INVAL)
367
    if has_adopt:
368
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
369
        raise errors.OpPrereqError("Disk adoption is not supported for the"
370
                                   " '%s' disk template" %
371
                                   self.op.disk_template,
372
                                   errors.ECODE_INVAL)
373
      if self.op.iallocator is not None:
374
        raise errors.OpPrereqError("Disk adoption not allowed with an"
375
                                   " iallocator script", errors.ECODE_INVAL)
376
      if self.op.mode == constants.INSTANCE_IMPORT:
377
        raise errors.OpPrereqError("Disk adoption not allowed for"
378
                                   " instance import", errors.ECODE_INVAL)
379
    else:
380
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
381
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
382
                                   " but no 'adopt' parameter given" %
383
                                   self.op.disk_template,
384
                                   errors.ECODE_INVAL)
385

    
386
    self.adopt_disks = has_adopt
387

    
388
  def _CheckVLANArguments(self):
389
    """ Check validity of VLANs if given
390

391
    """
392
    for nic in self.op.nics:
393
      vlan = nic.get(constants.INIC_VLAN, None)
394
      if vlan:
395
        if vlan[0] == ".":
396
          # vlan starting with dot means single untagged vlan,
397
          # might be followed by trunk (:)
398
          if not vlan[1:].isdigit():
399
            vlanlist = vlan[1:].split(':')
400
            for vl in vlanlist:
401
              if not vl.isdigit():
402
                raise errors.OpPrereqError("Specified VLAN parameter is "
403
                                           "invalid : %s" % vlan,
404
                                             errors.ECODE_INVAL)
405
        elif vlan[0] == ":":
406
          # Trunk - tagged only
407
          vlanlist = vlan[1:].split(':')
408
          for vl in vlanlist:
409
            if not vl.isdigit():
410
              raise errors.OpPrereqError("Specified VLAN parameter is invalid"
411
                                           " : %s" % vlan, errors.ECODE_INVAL)
412
        elif vlan.isdigit():
413
          # This is the simplest case. No dots, only single digit
414
          # -> Create untagged access port, dot needs to be added
415
          nic[constants.INIC_VLAN] = "." + vlan
416
        else:
417
          raise errors.OpPrereqError("Specified VLAN parameter is invalid"
418
                                       " : %s" % vlan, errors.ECODE_INVAL)
419

    
420
  def CheckArguments(self):
421
    """Check arguments.
422

423
    """
424
    # do not require name_check to ease forward/backward compatibility
425
    # for tools
426
    if self.op.no_install and self.op.start:
427
      self.LogInfo("No-installation mode selected, disabling startup")
428
      self.op.start = False
429
    # validate/normalize the instance name
430
    self.op.instance_name = \
431
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
432

    
433
    if self.op.ip_check and not self.op.name_check:
434
      # TODO: make the ip check more flexible and not depend on the name check
435
      raise errors.OpPrereqError("Cannot do IP address check without a name"
436
                                 " check", errors.ECODE_INVAL)
437

    
438
    # check nics' parameter names
439
    for nic in self.op.nics:
440
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
441
    # check that NIC's parameters names are unique and valid
442
    utils.ValidateDeviceNames("NIC", self.op.nics)
443

    
444
    self._CheckVLANArguments()
445

    
446
    self._CheckDiskArguments()
447

    
448
    # instance name verification
449
    if self.op.name_check:
450
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
451
      self.op.instance_name = self.hostname.name
452
      # used in CheckPrereq for ip ping check
453
      self.check_ip = self.hostname.ip
454
    else:
455
      self.check_ip = None
456

    
457
    # file storage checks
458
    if (self.op.file_driver and
459
        not self.op.file_driver in constants.FILE_DRIVER):
460
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
461
                                 self.op.file_driver, errors.ECODE_INVAL)
462

    
463
    ### Node/iallocator related checks
464
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
465

    
466
    if self.op.pnode is not None:
467
      if self.op.disk_template in constants.DTS_INT_MIRROR:
468
        if self.op.snode is None:
469
          raise errors.OpPrereqError("The networked disk templates need"
470
                                     " a mirror node", errors.ECODE_INVAL)
471
      elif self.op.snode:
472
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
473
                        " template")
474
        self.op.snode = None
475

    
476
    _CheckOpportunisticLocking(self.op)
477

    
478
    self._cds = GetClusterDomainSecret()
479

    
480
    if self.op.mode == constants.INSTANCE_IMPORT:
481
      # On import force_variant must be True, because if we forced it at
482
      # initial install, our only chance when importing it back is that it
483
      # works again!
484
      self.op.force_variant = True
485

    
486
      if self.op.no_install:
487
        self.LogInfo("No-installation mode has no effect during import")
488

    
489
    elif self.op.mode == constants.INSTANCE_CREATE:
490
      if self.op.os_type is None:
491
        raise errors.OpPrereqError("No guest OS specified",
492
                                   errors.ECODE_INVAL)
493
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
494
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
495
                                   " installation" % self.op.os_type,
496
                                   errors.ECODE_STATE)
497
      if self.op.disk_template is None:
498
        raise errors.OpPrereqError("No disk template specified",
499
                                   errors.ECODE_INVAL)
500

    
501
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
502
      # Check handshake to ensure both clusters have the same domain secret
503
      src_handshake = self.op.source_handshake
504
      if not src_handshake:
505
        raise errors.OpPrereqError("Missing source handshake",
506
                                   errors.ECODE_INVAL)
507

    
508
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
509
                                                           src_handshake)
510
      if errmsg:
511
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
512
                                   errors.ECODE_INVAL)
513

    
514
      # Load and check source CA
515
      self.source_x509_ca_pem = self.op.source_x509_ca
516
      if not self.source_x509_ca_pem:
517
        raise errors.OpPrereqError("Missing source X509 CA",
518
                                   errors.ECODE_INVAL)
519

    
520
      try:
521
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
522
                                                    self._cds)
523
      except OpenSSL.crypto.Error, err:
524
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
525
                                   (err, ), errors.ECODE_INVAL)
526

    
527
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
528
      if errcode is not None:
529
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
530
                                   errors.ECODE_INVAL)
531

    
532
      self.source_x509_ca = cert
533

    
534
      src_instance_name = self.op.source_instance_name
535
      if not src_instance_name:
536
        raise errors.OpPrereqError("Missing source instance name",
537
                                   errors.ECODE_INVAL)
538

    
539
      self.source_instance_name = \
540
        netutils.GetHostname(name=src_instance_name).name
541

    
542
    else:
543
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
544
                                 self.op.mode, errors.ECODE_INVAL)
545

    
546
  def ExpandNames(self):
547
    """ExpandNames for CreateInstance.
548

549
    Figure out the right locks for instance creation.
550

551
    """
552
    self.needed_locks = {}
553

    
554
    # this is just a preventive check, but someone might still add this
555
    # instance in the meantime, and creation will fail at lock-add time
556
    if self.op.instance_name in\
557
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
558
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
559
                                 self.op.instance_name, errors.ECODE_EXISTS)
560

    
561
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
562

    
563
    if self.op.iallocator:
564
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
565
      # specifying a group on instance creation and then selecting nodes from
566
      # that group
567
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
568
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
569

    
570
      if self.op.opportunistic_locking:
571
        self.opportunistic_locks[locking.LEVEL_NODE] = True
572
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
573
    else:
574
      (self.op.pnode_uuid, self.op.pnode) = \
575
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
576
      nodelist = [self.op.pnode_uuid]
577
      if self.op.snode is not None:
578
        (self.op.snode_uuid, self.op.snode) = \
579
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
580
        nodelist.append(self.op.snode_uuid)
581
      self.needed_locks[locking.LEVEL_NODE] = nodelist
582

    
583
    # in case of import lock the source node too
584
    if self.op.mode == constants.INSTANCE_IMPORT:
585
      src_node = self.op.src_node
586
      src_path = self.op.src_path
587

    
588
      if src_path is None:
589
        self.op.src_path = src_path = self.op.instance_name
590

    
591
      if src_node is None:
592
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
593
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
594
        self.op.src_node = None
595
        if os.path.isabs(src_path):
596
          raise errors.OpPrereqError("Importing an instance from a path"
597
                                     " requires a source node option",
598
                                     errors.ECODE_INVAL)
599
      else:
600
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
601
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
602
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
603
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
604
        if not os.path.isabs(src_path):
605
          self.op.src_path = src_path = \
606
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
607

    
608
    self.needed_locks[locking.LEVEL_NODE_RES] = \
609
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
610

    
611
  def _RunAllocator(self):
612
    """Run the allocator based on input opcode.
613

614
    """
615
    if self.op.opportunistic_locking:
616
      # Only consider nodes for which a lock is held
617
      node_name_whitelist = self.cfg.GetNodeNames(
618
        self.owned_locks(locking.LEVEL_NODE))
619
    else:
620
      node_name_whitelist = None
621

    
622
    #TODO Export network to iallocator so that it chooses a pnode
623
    #     in a nodegroup that has the desired network connected to
624
    req = _CreateInstanceAllocRequest(self.op, self.disks,
625
                                      self.nics, self.be_full,
626
                                      node_name_whitelist)
627
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
628

    
629
    ial.Run(self.op.iallocator)
630

    
631
    if not ial.success:
632
      # When opportunistic locks are used only a temporary failure is generated
633
      if self.op.opportunistic_locking:
634
        ecode = errors.ECODE_TEMP_NORES
635
      else:
636
        ecode = errors.ECODE_NORES
637

    
638
      raise errors.OpPrereqError("Can't compute nodes using"
639
                                 " iallocator '%s': %s" %
640
                                 (self.op.iallocator, ial.info),
641
                                 ecode)
642

    
643
    (self.op.pnode_uuid, self.op.pnode) = \
644
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
645
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
646
                 self.op.instance_name, self.op.iallocator,
647
                 utils.CommaJoin(ial.result))
648

    
649
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
650

    
651
    if req.RequiredNodes() == 2:
652
      (self.op.snode_uuid, self.op.snode) = \
653
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
654

    
655
  def BuildHooksEnv(self):
656
    """Build hooks env.
657

658
    This runs on master, primary and secondary nodes of the instance.
659

660
    """
661
    env = {
662
      "ADD_MODE": self.op.mode,
663
      }
664
    if self.op.mode == constants.INSTANCE_IMPORT:
665
      env["SRC_NODE"] = self.op.src_node
666
      env["SRC_PATH"] = self.op.src_path
667
      env["SRC_IMAGES"] = self.src_images
668

    
669
    env.update(BuildInstanceHookEnv(
670
      name=self.op.instance_name,
671
      primary_node_name=self.op.pnode,
672
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
673
      status=self.op.start,
674
      os_type=self.op.os_type,
675
      minmem=self.be_full[constants.BE_MINMEM],
676
      maxmem=self.be_full[constants.BE_MAXMEM],
677
      vcpus=self.be_full[constants.BE_VCPUS],
678
      nics=NICListToTuple(self, self.nics),
679
      disk_template=self.op.disk_template,
680
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
681
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
682
             for d in self.disks],
683
      bep=self.be_full,
684
      hvp=self.hv_full,
685
      hypervisor_name=self.op.hypervisor,
686
      tags=self.op.tags,
687
      ))
688

    
689
    return env
690

    
691
  def BuildHooksNodes(self):
692
    """Build hooks nodes.
693

694
    """
695
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
696
    return nl, nl
697

    
698
  def _ReadExportInfo(self):
699
    """Reads the export information from disk.
700

701
    It will override the opcode source node and path with the actual
702
    information, if these two were not specified before.
703

704
    @return: the export information
705

706
    """
707
    assert self.op.mode == constants.INSTANCE_IMPORT
708

    
709
    if self.op.src_node_uuid is None:
710
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
711
      exp_list = self.rpc.call_export_list(locked_nodes)
712
      found = False
713
      for node in exp_list:
714
        if exp_list[node].fail_msg:
715
          continue
716
        if self.op.src_path in exp_list[node].payload:
717
          found = True
718
          self.op.src_node = node
719
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
720
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
721
                                            self.op.src_path)
722
          break
723
      if not found:
724
        raise errors.OpPrereqError("No export found for relative path %s" %
725
                                   self.op.src_path, errors.ECODE_INVAL)
726

    
727
    CheckNodeOnline(self, self.op.src_node_uuid)
728
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
729
    result.Raise("No export or invalid export found in dir %s" %
730
                 self.op.src_path)
731

    
732
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
733
    if not export_info.has_section(constants.INISECT_EXP):
734
      raise errors.ProgrammerError("Corrupted export config",
735
                                   errors.ECODE_ENVIRON)
736

    
737
    ei_version = export_info.get(constants.INISECT_EXP, "version")
738
    if int(ei_version) != constants.EXPORT_VERSION:
739
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
740
                                 (ei_version, constants.EXPORT_VERSION),
741
                                 errors.ECODE_ENVIRON)
742
    return export_info
743

    
744
  def _ReadExportParams(self, einfo):
745
    """Use export parameters as defaults.
746

747
    In case the opcode doesn't specify (as in override) some instance
748
    parameters, then try to use them from the export information, if
749
    that declares them.
750

751
    """
752
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
753

    
754
    if not self.op.disks:
755
      disks = []
756
      # TODO: import the disk iv_name too
757
      for idx in range(constants.MAX_DISKS):
758
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
759
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
760
          disks.append({constants.IDISK_SIZE: disk_sz})
761
      self.op.disks = disks
762
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
763
        raise errors.OpPrereqError("No disk info specified and the export"
764
                                   " is missing the disk information",
765
                                   errors.ECODE_INVAL)
766

    
767
    if not self.op.nics:
768
      nics = []
769
      for idx in range(constants.MAX_NICS):
770
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
771
          ndict = {}
772
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
773
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
774
            ndict[name] = v
775
          nics.append(ndict)
776
        else:
777
          break
778
      self.op.nics = nics
779

    
780
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
781
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
782

    
783
    if (self.op.hypervisor is None and
784
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
785
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
786

    
787
    if einfo.has_section(constants.INISECT_HYP):
788
      # use the export parameters but do not override the ones
789
      # specified by the user
790
      for name, value in einfo.items(constants.INISECT_HYP):
791
        if name not in self.op.hvparams:
792
          self.op.hvparams[name] = value
793

    
794
    if einfo.has_section(constants.INISECT_BEP):
795
      # use the parameters, without overriding
796
      for name, value in einfo.items(constants.INISECT_BEP):
797
        if name not in self.op.beparams:
798
          self.op.beparams[name] = value
799
        # Compatibility for the old "memory" be param
800
        if name == constants.BE_MEMORY:
801
          if constants.BE_MAXMEM not in self.op.beparams:
802
            self.op.beparams[constants.BE_MAXMEM] = value
803
          if constants.BE_MINMEM not in self.op.beparams:
804
            self.op.beparams[constants.BE_MINMEM] = value
805
    else:
806
      # try to read the parameters old style, from the main section
807
      for name in constants.BES_PARAMETERS:
808
        if (name not in self.op.beparams and
809
            einfo.has_option(constants.INISECT_INS, name)):
810
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
811

    
812
    if einfo.has_section(constants.INISECT_OSP):
813
      # use the parameters, without overriding
814
      for name, value in einfo.items(constants.INISECT_OSP):
815
        if name not in self.op.osparams:
816
          self.op.osparams[name] = value
817

    
818
  def _RevertToDefaults(self, cluster):
819
    """Revert the instance parameters to the default values.
820

821
    """
822
    # hvparams
823
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
824
    for name in self.op.hvparams.keys():
825
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
826
        del self.op.hvparams[name]
827
    # beparams
828
    be_defs = cluster.SimpleFillBE({})
829
    for name in self.op.beparams.keys():
830
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
831
        del self.op.beparams[name]
832
    # nic params
833
    nic_defs = cluster.SimpleFillNIC({})
834
    for nic in self.op.nics:
835
      for name in constants.NICS_PARAMETERS:
836
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
837
          del nic[name]
838
    # osparams
839
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
840
    for name in self.op.osparams.keys():
841
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
842
        del self.op.osparams[name]
843

    
844
  def _CalculateFileStorageDir(self):
845
    """Calculate final instance file storage dir.
846

847
    """
848
    # file storage dir calculation/check
849
    self.instance_file_storage_dir = None
850
    if self.op.disk_template in constants.DTS_FILEBASED:
851
      # build the full file storage dir path
852
      joinargs = []
853

    
854
      if self.op.disk_template == constants.DT_SHARED_FILE:
855
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
856
      else:
857
        get_fsd_fn = self.cfg.GetFileStorageDir
858

    
859
      cfg_storagedir = get_fsd_fn()
860
      if not cfg_storagedir:
861
        raise errors.OpPrereqError("Cluster file storage dir not defined",
862
                                   errors.ECODE_STATE)
863
      joinargs.append(cfg_storagedir)
864

    
865
      if self.op.file_storage_dir is not None:
866
        joinargs.append(self.op.file_storage_dir)
867

    
868
      joinargs.append(self.op.instance_name)
869

    
870
      # pylint: disable=W0142
871
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
872

    
873
  def CheckPrereq(self): # pylint: disable=R0914
874
    """Check prerequisites.
875

876
    """
877
    self._CalculateFileStorageDir()
878

    
879
    if self.op.mode == constants.INSTANCE_IMPORT:
880
      export_info = self._ReadExportInfo()
881
      self._ReadExportParams(export_info)
882
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
883
    else:
884
      self._old_instance_name = None
885

    
886
    if (not self.cfg.GetVGName() and
887
        self.op.disk_template not in constants.DTS_NOT_LVM):
888
      raise errors.OpPrereqError("Cluster does not support lvm-based"
889
                                 " instances", errors.ECODE_STATE)
890

    
891
    if (self.op.hypervisor is None or
892
        self.op.hypervisor == constants.VALUE_AUTO):
893
      self.op.hypervisor = self.cfg.GetHypervisorType()
894

    
895
    cluster = self.cfg.GetClusterInfo()
896
    enabled_hvs = cluster.enabled_hypervisors
897
    if self.op.hypervisor not in enabled_hvs:
898
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
899
                                 " cluster (%s)" %
900
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
901
                                 errors.ECODE_STATE)
902

    
903
    # Check tag validity
904
    for tag in self.op.tags:
905
      objects.TaggableObject.ValidateTag(tag)
906

    
907
    # check hypervisor parameter syntax (locally)
908
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
909
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
910
                                      self.op.hvparams)
911
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
912
    hv_type.CheckParameterSyntax(filled_hvp)
913
    self.hv_full = filled_hvp
914
    # check that we don't specify global parameters on an instance
915
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
916
                         "instance", "cluster")
917

    
918
    # fill and remember the beparams dict
919
    self.be_full = _ComputeFullBeParams(self.op, cluster)
920

    
921
    # build os parameters
922
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
923

    
924
    # now that hvp/bep are in final format, let's reset to defaults,
925
    # if told to do so
926
    if self.op.identify_defaults:
927
      self._RevertToDefaults(cluster)
928

    
929
    # NIC buildup
930
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
931
                             self.proc.GetECId())
932

    
933
    # disk checks/pre-build
934
    default_vg = self.cfg.GetVGName()
935
    self.disks = ComputeDisks(self.op, default_vg)
936

    
937
    if self.op.mode == constants.INSTANCE_IMPORT:
938
      disk_images = []
939
      for idx in range(len(self.disks)):
940
        option = "disk%d_dump" % idx
941
        if export_info.has_option(constants.INISECT_INS, option):
942
          # FIXME: are the old os-es, disk sizes, etc. useful?
943
          export_name = export_info.get(constants.INISECT_INS, option)
944
          image = utils.PathJoin(self.op.src_path, export_name)
945
          disk_images.append(image)
946
        else:
947
          disk_images.append(False)
948

    
949
      self.src_images = disk_images
950

    
951
      if self.op.instance_name == self._old_instance_name:
952
        for idx, nic in enumerate(self.nics):
953
          if nic.mac == constants.VALUE_AUTO:
954
            nic_mac_ini = "nic%d_mac" % idx
955
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
956

    
957
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
958

    
959
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
960
    if self.op.ip_check:
961
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
962
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
963
                                   (self.check_ip, self.op.instance_name),
964
                                   errors.ECODE_NOTUNIQUE)
965

    
966
    #### mac address generation
967
    # By generating here the mac address both the allocator and the hooks get
968
    # the real final mac address rather than the 'auto' or 'generate' value.
969
    # There is a race condition between the generation and the instance object
970
    # creation, which means that we know the mac is valid now, but we're not
971
    # sure it will be when we actually add the instance. If things go bad
972
    # adding the instance will abort because of a duplicate mac, and the
973
    # creation job will fail.
974
    for nic in self.nics:
975
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
976
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
977

    
978
    #### allocator run
979

    
980
    if self.op.iallocator is not None:
981
      self._RunAllocator()
982

    
983
    # Release all unneeded node locks
984
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
985
                               self.op.src_node_uuid])
986
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
987
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
988
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
989

    
990
    assert (self.owned_locks(locking.LEVEL_NODE) ==
991
            self.owned_locks(locking.LEVEL_NODE_RES)), \
992
      "Node locks differ from node resource locks"
993

    
994
    #### node related checks
995

    
996
    # check primary node
997
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
998
    assert self.pnode is not None, \
999
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1000
    if pnode.offline:
1001
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1002
                                 pnode.name, errors.ECODE_STATE)
1003
    if pnode.drained:
1004
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1005
                                 pnode.name, errors.ECODE_STATE)
1006
    if not pnode.vm_capable:
1007
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1008
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1009

    
1010
    self.secondaries = []
1011

    
1012
    # Fill in any IPs from IP pools. This must happen here, because we need to
1013
    # know the nic's primary node, as specified by the iallocator
1014
    for idx, nic in enumerate(self.nics):
1015
      net_uuid = nic.network
1016
      if net_uuid is not None:
1017
        nobj = self.cfg.GetNetwork(net_uuid)
1018
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1019
        if netparams is None:
1020
          raise errors.OpPrereqError("No netparams found for network"
1021
                                     " %s. Propably not connected to"
1022
                                     " node's %s nodegroup" %
1023
                                     (nobj.name, self.pnode.name),
1024
                                     errors.ECODE_INVAL)
1025
        self.LogInfo("NIC/%d inherits netparams %s" %
1026
                     (idx, netparams.values()))
1027
        nic.nicparams = dict(netparams)
1028
        if nic.ip is not None:
1029
          if nic.ip.lower() == constants.NIC_IP_POOL:
1030
            try:
1031
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1032
            except errors.ReservationError:
1033
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1034
                                         " from the address pool" % idx,
1035
                                         errors.ECODE_STATE)
1036
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1037
          else:
1038
            try:
1039
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1040
            except errors.ReservationError:
1041
              raise errors.OpPrereqError("IP address %s already in use"
1042
                                         " or does not belong to network %s" %
1043
                                         (nic.ip, nobj.name),
1044
                                         errors.ECODE_NOTUNIQUE)
1045

    
1046
      # net is None, ip None or given
1047
      elif self.op.conflicts_check:
1048
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1049

    
1050
    # mirror node verification
1051
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1052
      if self.op.snode_uuid == pnode.uuid:
1053
        raise errors.OpPrereqError("The secondary node cannot be the"
1054
                                   " primary node", errors.ECODE_INVAL)
1055
      CheckNodeOnline(self, self.op.snode_uuid)
1056
      CheckNodeNotDrained(self, self.op.snode_uuid)
1057
      CheckNodeVmCapable(self, self.op.snode_uuid)
1058
      self.secondaries.append(self.op.snode_uuid)
1059

    
1060
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1061
      if pnode.group != snode.group:
1062
        self.LogWarning("The primary and secondary nodes are in two"
1063
                        " different node groups; the disk parameters"
1064
                        " from the first disk's node group will be"
1065
                        " used")
1066

    
1067
    nodes = [pnode]
1068
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1069
      nodes.append(snode)
1070
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1071
    excl_stor = compat.any(map(has_es, nodes))
1072
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1073
      raise errors.OpPrereqError("Disk template %s not supported with"
1074
                                 " exclusive storage" % self.op.disk_template,
1075
                                 errors.ECODE_STATE)
1076
    for disk in self.disks:
1077
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1078

    
1079
    node_uuids = [pnode.uuid] + self.secondaries
1080

    
1081
    if not self.adopt_disks:
1082
      if self.op.disk_template == constants.DT_RBD:
1083
        # _CheckRADOSFreeSpace() is just a placeholder.
1084
        # Any function that checks prerequisites can be placed here.
1085
        # Check if there is enough space on the RADOS cluster.
1086
        CheckRADOSFreeSpace()
1087
      elif self.op.disk_template == constants.DT_EXT:
1088
        # FIXME: Function that checks prereqs if needed
1089
        pass
1090
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1091
        # Check lv size requirements, if not adopting
1092
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1093
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1094
      else:
1095
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1096
        pass
1097

    
1098
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1099
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1100
                                disk[constants.IDISK_ADOPT])
1101
                     for disk in self.disks])
1102
      if len(all_lvs) != len(self.disks):
1103
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1104
                                   errors.ECODE_INVAL)
1105
      for lv_name in all_lvs:
1106
        try:
1107
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1108
          # to ReserveLV uses the same syntax
1109
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1110
        except errors.ReservationError:
1111
          raise errors.OpPrereqError("LV named %s used by another instance" %
1112
                                     lv_name, errors.ECODE_NOTUNIQUE)
1113

    
1114
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1115
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1116

    
1117
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1118
                                       vg_names.payload.keys())[pnode.uuid]
1119
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1120
      node_lvs = node_lvs.payload
1121

    
1122
      delta = all_lvs.difference(node_lvs.keys())
1123
      if delta:
1124
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1125
                                   utils.CommaJoin(delta),
1126
                                   errors.ECODE_INVAL)
1127
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1128
      if online_lvs:
1129
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1130
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1131
                                   errors.ECODE_STATE)
1132
      # update the size of disk based on what is found
1133
      for dsk in self.disks:
1134
        dsk[constants.IDISK_SIZE] = \
1135
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1136
                                        dsk[constants.IDISK_ADOPT])][0]))
1137

    
1138
    elif self.op.disk_template == constants.DT_BLOCK:
1139
      # Normalize and de-duplicate device paths
1140
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1141
                       for disk in self.disks])
1142
      if len(all_disks) != len(self.disks):
1143
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1144
                                   errors.ECODE_INVAL)
1145
      baddisks = [d for d in all_disks
1146
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1147
      if baddisks:
1148
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1149
                                   " cannot be adopted" %
1150
                                   (utils.CommaJoin(baddisks),
1151
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1152
                                   errors.ECODE_INVAL)
1153

    
1154
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1155
                                            list(all_disks))[pnode.uuid]
1156
      node_disks.Raise("Cannot get block device information from node %s" %
1157
                       pnode.name)
1158
      node_disks = node_disks.payload
1159
      delta = all_disks.difference(node_disks.keys())
1160
      if delta:
1161
        raise errors.OpPrereqError("Missing block device(s): %s" %
1162
                                   utils.CommaJoin(delta),
1163
                                   errors.ECODE_INVAL)
1164
      for dsk in self.disks:
1165
        dsk[constants.IDISK_SIZE] = \
1166
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1167

    
1168
    # Verify instance specs
1169
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1170
    ispec = {
1171
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1172
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1173
      constants.ISPEC_DISK_COUNT: len(self.disks),
1174
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1175
                                  for disk in self.disks],
1176
      constants.ISPEC_NIC_COUNT: len(self.nics),
1177
      constants.ISPEC_SPINDLE_USE: spindle_use,
1178
      }
1179

    
1180
    group_info = self.cfg.GetNodeGroup(pnode.group)
1181
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1182
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1183
                                               self.op.disk_template)
1184
    if not self.op.ignore_ipolicy and res:
1185
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1186
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1187
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1188

    
1189
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1190

    
1191
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1192
    # check OS parameters (remotely)
1193
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1194

    
1195
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1196

    
1197
    #TODO: _CheckExtParams (remotely)
1198
    # Check parameters for extstorage
1199

    
1200
    # memory check on primary node
1201
    #TODO(dynmem): use MINMEM for checking
1202
    if self.op.start:
1203
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1204
                                self.op.hvparams)
1205
      CheckNodeFreeMemory(self, self.pnode.uuid,
1206
                          "creating instance %s" % self.op.instance_name,
1207
                          self.be_full[constants.BE_MAXMEM],
1208
                          self.op.hypervisor, hvfull)
1209

    
1210
    self.dry_run_result = list(node_uuids)
1211

    
1212
  def Exec(self, feedback_fn):
1213
    """Create and add the instance to the cluster.
1214

1215
    """
1216
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1217
                self.owned_locks(locking.LEVEL_NODE)), \
1218
      "Node locks differ from node resource locks"
1219
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1220

    
1221
    ht_kind = self.op.hypervisor
1222
    if ht_kind in constants.HTS_REQ_PORT:
1223
      network_port = self.cfg.AllocatePort()
1224
    else:
1225
      network_port = None
1226

    
1227
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1228

    
1229
    # This is ugly but we got a chicken-egg problem here
1230
    # We can only take the group disk parameters, as the instance
1231
    # has no disks yet (we are generating them right here).
1232
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1233
    disks = GenerateDiskTemplate(self,
1234
                                 self.op.disk_template,
1235
                                 instance_uuid, self.pnode.uuid,
1236
                                 self.secondaries,
1237
                                 self.disks,
1238
                                 self.instance_file_storage_dir,
1239
                                 self.op.file_driver,
1240
                                 0,
1241
                                 feedback_fn,
1242
                                 self.cfg.GetGroupDiskParams(nodegroup))
1243

    
1244
    iobj = objects.Instance(name=self.op.instance_name,
1245
                            uuid=instance_uuid,
1246
                            os=self.op.os_type,
1247
                            primary_node=self.pnode.uuid,
1248
                            nics=self.nics, disks=disks,
1249
                            disk_template=self.op.disk_template,
1250
                            disks_active=False,
1251
                            admin_state=constants.ADMINST_DOWN,
1252
                            network_port=network_port,
1253
                            beparams=self.op.beparams,
1254
                            hvparams=self.op.hvparams,
1255
                            hypervisor=self.op.hypervisor,
1256
                            osparams=self.op.osparams,
1257
                            )
1258

    
1259
    if self.op.tags:
1260
      for tag in self.op.tags:
1261
        iobj.AddTag(tag)
1262

    
1263
    if self.adopt_disks:
1264
      if self.op.disk_template == constants.DT_PLAIN:
1265
        # rename LVs to the newly-generated names; we need to construct
1266
        # 'fake' LV disks with the old data, plus the new unique_id
1267
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1268
        rename_to = []
1269
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1270
          rename_to.append(t_dsk.logical_id)
1271
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1272
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1273
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1274
                                               zip(tmp_disks, rename_to))
1275
        result.Raise("Failed to rename adoped LVs")
1276
    else:
1277
      feedback_fn("* creating instance disks...")
1278
      try:
1279
        CreateDisks(self, iobj)
1280
      except errors.OpExecError:
1281
        self.LogWarning("Device creation failed")
1282
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1283
        raise
1284

    
1285
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1286

    
1287
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1288

    
1289
    # Declare that we don't want to remove the instance lock anymore, as we've
1290
    # added the instance to the config
1291
    del self.remove_locks[locking.LEVEL_INSTANCE]
1292

    
1293
    if self.op.mode == constants.INSTANCE_IMPORT:
1294
      # Release unused nodes
1295
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1296
    else:
1297
      # Release all nodes
1298
      ReleaseLocks(self, locking.LEVEL_NODE)
1299

    
1300
    disk_abort = False
1301
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1302
      feedback_fn("* wiping instance disks...")
1303
      try:
1304
        WipeDisks(self, iobj)
1305
      except errors.OpExecError, err:
1306
        logging.exception("Wiping disks failed")
1307
        self.LogWarning("Wiping instance disks failed (%s)", err)
1308
        disk_abort = True
1309

    
1310
    if disk_abort:
1311
      # Something is already wrong with the disks, don't do anything else
1312
      pass
1313
    elif self.op.wait_for_sync:
1314
      disk_abort = not WaitForSync(self, iobj)
1315
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1316
      # make sure the disks are not degraded (still sync-ing is ok)
1317
      feedback_fn("* checking mirrors status")
1318
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1319
    else:
1320
      disk_abort = False
1321

    
1322
    if disk_abort:
1323
      RemoveDisks(self, iobj)
1324
      self.cfg.RemoveInstance(iobj.uuid)
1325
      # Make sure the instance lock gets removed
1326
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1327
      raise errors.OpExecError("There are some degraded disks for"
1328
                               " this instance")
1329

    
1330
    # instance disks are now active
1331
    iobj.disks_active = True
1332

    
1333
    # Release all node resource locks
1334
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1335

    
1336
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1337
      # we need to set the disks ID to the primary node, since the
1338
      # preceding code might or might have not done it, depending on
1339
      # disk template and other options
1340
      for disk in iobj.disks:
1341
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1342
      if self.op.mode == constants.INSTANCE_CREATE:
1343
        if not self.op.no_install:
1344
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1345
                        not self.op.wait_for_sync)
1346
          if pause_sync:
1347
            feedback_fn("* pausing disk sync to install instance OS")
1348
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1349
                                                              (iobj.disks,
1350
                                                               iobj), True)
1351
            for idx, success in enumerate(result.payload):
1352
              if not success:
1353
                logging.warn("pause-sync of instance %s for disk %d failed",
1354
                             self.op.instance_name, idx)
1355

    
1356
          feedback_fn("* running the instance OS create scripts...")
1357
          # FIXME: pass debug option from opcode to backend
1358
          os_add_result = \
1359
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1360
                                          self.op.debug_level)
1361
          if pause_sync:
1362
            feedback_fn("* resuming disk sync")
1363
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1364
                                                              (iobj.disks,
1365
                                                               iobj), False)
1366
            for idx, success in enumerate(result.payload):
1367
              if not success:
1368
                logging.warn("resume-sync of instance %s for disk %d failed",
1369
                             self.op.instance_name, idx)
1370

    
1371
          os_add_result.Raise("Could not add os for instance %s"
1372
                              " on node %s" % (self.op.instance_name,
1373
                                               self.pnode.name))
1374

    
1375
      else:
1376
        if self.op.mode == constants.INSTANCE_IMPORT:
1377
          feedback_fn("* running the instance OS import scripts...")
1378

    
1379
          transfers = []
1380

    
1381
          for idx, image in enumerate(self.src_images):
1382
            if not image:
1383
              continue
1384

    
1385
            # FIXME: pass debug option from opcode to backend
1386
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1387
                                               constants.IEIO_FILE, (image, ),
1388
                                               constants.IEIO_SCRIPT,
1389
                                               (iobj.disks[idx], idx),
1390
                                               None)
1391
            transfers.append(dt)
1392

    
1393
          import_result = \
1394
            masterd.instance.TransferInstanceData(self, feedback_fn,
1395
                                                  self.op.src_node_uuid,
1396
                                                  self.pnode.uuid,
1397
                                                  self.pnode.secondary_ip,
1398
                                                  iobj, transfers)
1399
          if not compat.all(import_result):
1400
            self.LogWarning("Some disks for instance %s on node %s were not"
1401
                            " imported successfully" % (self.op.instance_name,
1402
                                                        self.pnode.name))
1403

    
1404
          rename_from = self._old_instance_name
1405

    
1406
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1407
          feedback_fn("* preparing remote import...")
1408
          # The source cluster will stop the instance before attempting to make
1409
          # a connection. In some cases stopping an instance can take a long
1410
          # time, hence the shutdown timeout is added to the connection
1411
          # timeout.
1412
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1413
                             self.op.source_shutdown_timeout)
1414
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1415

    
1416
          assert iobj.primary_node == self.pnode.uuid
1417
          disk_results = \
1418
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1419
                                          self.source_x509_ca,
1420
                                          self._cds, timeouts)
1421
          if not compat.all(disk_results):
1422
            # TODO: Should the instance still be started, even if some disks
1423
            # failed to import (valid for local imports, too)?
1424
            self.LogWarning("Some disks for instance %s on node %s were not"
1425
                            " imported successfully" % (self.op.instance_name,
1426
                                                        self.pnode.name))
1427

    
1428
          rename_from = self.source_instance_name
1429

    
1430
        else:
1431
          # also checked in the prereq part
1432
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1433
                                       % self.op.mode)
1434

    
1435
        # Run rename script on newly imported instance
1436
        assert iobj.name == self.op.instance_name
1437
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1438
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1439
                                                   rename_from,
1440
                                                   self.op.debug_level)
1441
        result.Warn("Failed to run rename script for %s on node %s" %
1442
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1443

    
1444
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1445

    
1446
    if self.op.start:
1447
      iobj.admin_state = constants.ADMINST_UP
1448
      self.cfg.Update(iobj, feedback_fn)
1449
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1450
                   self.pnode.name)
1451
      feedback_fn("* starting instance...")
1452
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1453
                                            False, self.op.reason)
1454
      result.Raise("Could not start instance")
1455

    
1456
    return list(iobj.all_nodes)
1457

    
1458

    
1459
class LUInstanceRename(LogicalUnit):
1460
  """Rename an instance.
1461

1462
  """
1463
  HPATH = "instance-rename"
1464
  HTYPE = constants.HTYPE_INSTANCE
1465

    
1466
  def CheckArguments(self):
1467
    """Check arguments.
1468

1469
    """
1470
    if self.op.ip_check and not self.op.name_check:
1471
      # TODO: make the ip check more flexible and not depend on the name check
1472
      raise errors.OpPrereqError("IP address check requires a name check",
1473
                                 errors.ECODE_INVAL)
1474

    
1475
  def BuildHooksEnv(self):
1476
    """Build hooks env.
1477

1478
    This runs on master, primary and secondary nodes of the instance.
1479

1480
    """
1481
    env = BuildInstanceHookEnvByObject(self, self.instance)
1482
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1483
    return env
1484

    
1485
  def BuildHooksNodes(self):
1486
    """Build hooks nodes.
1487

1488
    """
1489
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1490
    return (nl, nl)
1491

    
1492
  def CheckPrereq(self):
1493
    """Check prerequisites.
1494

1495
    This checks that the instance is in the cluster and is not running.
1496

1497
    """
1498
    (self.op.instance_uuid, self.op.instance_name) = \
1499
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1500
                                self.op.instance_name)
1501
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1502
    assert instance is not None
1503

    
1504
    # It should actually not happen that an instance is running with a disabled
1505
    # disk template, but in case it does, the renaming of file-based instances
1506
    # will fail horribly. Thus, we test it before.
1507
    if (instance.disk_template in constants.DTS_FILEBASED and
1508
        self.op.new_name != instance.name):
1509
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1510
                               instance.disk_template)
1511

    
1512
    CheckNodeOnline(self, instance.primary_node)
1513
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1514
                       msg="cannot rename")
1515
    self.instance = instance
1516

    
1517
    new_name = self.op.new_name
1518
    if self.op.name_check:
1519
      hostname = _CheckHostnameSane(self, new_name)
1520
      new_name = self.op.new_name = hostname.name
1521
      if (self.op.ip_check and
1522
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1523
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1524
                                   (hostname.ip, new_name),
1525
                                   errors.ECODE_NOTUNIQUE)
1526

    
1527
    instance_names = [inst.name for
1528
                      inst in self.cfg.GetAllInstancesInfo().values()]
1529
    if new_name in instance_names and new_name != instance.name:
1530
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1531
                                 new_name, errors.ECODE_EXISTS)
1532

    
1533
  def Exec(self, feedback_fn):
1534
    """Rename the instance.
1535

1536
    """
1537
    old_name = self.instance.name
1538

    
1539
    rename_file_storage = False
1540
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1541
        self.op.new_name != self.instance.name):
1542
      old_file_storage_dir = os.path.dirname(
1543
                               self.instance.disks[0].logical_id[1])
1544
      rename_file_storage = True
1545

    
1546
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1547
    # Change the instance lock. This is definitely safe while we hold the BGL.
1548
    # Otherwise the new lock would have to be added in acquired mode.
1549
    assert self.REQ_BGL
1550
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1551
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1552
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1553

    
1554
    # re-read the instance from the configuration after rename
1555
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1556

    
1557
    if rename_file_storage:
1558
      new_file_storage_dir = os.path.dirname(
1559
                               renamed_inst.disks[0].logical_id[1])
1560
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1561
                                                     old_file_storage_dir,
1562
                                                     new_file_storage_dir)
1563
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1564
                   " (but the instance has been renamed in Ganeti)" %
1565
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1566
                    old_file_storage_dir, new_file_storage_dir))
1567

    
1568
    StartInstanceDisks(self, renamed_inst, None)
1569
    # update info on disks
1570
    info = GetInstanceInfoText(renamed_inst)
1571
    for (idx, disk) in enumerate(renamed_inst.disks):
1572
      for node_uuid in renamed_inst.all_nodes:
1573
        self.cfg.SetDiskID(disk, node_uuid)
1574
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1575
        result.Warn("Error setting info on node %s for disk %s" %
1576
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1577
    try:
1578
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1579
                                                 renamed_inst, old_name,
1580
                                                 self.op.debug_level)
1581
      result.Warn("Could not run OS rename script for instance %s on node %s"
1582
                  " (but the instance has been renamed in Ganeti)" %
1583
                  (renamed_inst.name,
1584
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1585
                  self.LogWarning)
1586
    finally:
1587
      ShutdownInstanceDisks(self, renamed_inst)
1588

    
1589
    return renamed_inst.name
1590

    
1591

    
1592
class LUInstanceRemove(LogicalUnit):
1593
  """Remove an instance.
1594

1595
  """
1596
  HPATH = "instance-remove"
1597
  HTYPE = constants.HTYPE_INSTANCE
1598
  REQ_BGL = False
1599

    
1600
  def ExpandNames(self):
1601
    self._ExpandAndLockInstance()
1602
    self.needed_locks[locking.LEVEL_NODE] = []
1603
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1604
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1605

    
1606
  def DeclareLocks(self, level):
1607
    if level == locking.LEVEL_NODE:
1608
      self._LockInstancesNodes()
1609
    elif level == locking.LEVEL_NODE_RES:
1610
      # Copy node locks
1611
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1612
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1613

    
1614
  def BuildHooksEnv(self):
1615
    """Build hooks env.
1616

1617
    This runs on master, primary and secondary nodes of the instance.
1618

1619
    """
1620
    env = BuildInstanceHookEnvByObject(self, self.instance)
1621
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1622
    return env
1623

    
1624
  def BuildHooksNodes(self):
1625
    """Build hooks nodes.
1626

1627
    """
1628
    nl = [self.cfg.GetMasterNode()]
1629
    nl_post = list(self.instance.all_nodes) + nl
1630
    return (nl, nl_post)
1631

    
1632
  def CheckPrereq(self):
1633
    """Check prerequisites.
1634

1635
    This checks that the instance is in the cluster.
1636

1637
    """
1638
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1639
    assert self.instance is not None, \
1640
      "Cannot retrieve locked instance %s" % self.op.instance_name
1641

    
1642
  def Exec(self, feedback_fn):
1643
    """Remove the instance.
1644

1645
    """
1646
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1647
                 self.cfg.GetNodeName(self.instance.primary_node))
1648

    
1649
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1650
                                             self.instance,
1651
                                             self.op.shutdown_timeout,
1652
                                             self.op.reason)
1653
    if self.op.ignore_failures:
1654
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1655
    else:
1656
      result.Raise("Could not shutdown instance %s on node %s" %
1657
                   (self.instance.name,
1658
                    self.cfg.GetNodeName(self.instance.primary_node)))
1659

    
1660
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1661
            self.owned_locks(locking.LEVEL_NODE_RES))
1662
    assert not (set(self.instance.all_nodes) -
1663
                self.owned_locks(locking.LEVEL_NODE)), \
1664
      "Not owning correct locks"
1665

    
1666
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1667

    
1668

    
1669
class LUInstanceMove(LogicalUnit):
1670
  """Move an instance by data-copying.
1671

1672
  """
1673
  HPATH = "instance-move"
1674
  HTYPE = constants.HTYPE_INSTANCE
1675
  REQ_BGL = False
1676

    
1677
  def ExpandNames(self):
1678
    self._ExpandAndLockInstance()
1679
    (self.op.target_node_uuid, self.op.target_node) = \
1680
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1681
                            self.op.target_node)
1682
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1683
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1684
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1685

    
1686
  def DeclareLocks(self, level):
1687
    if level == locking.LEVEL_NODE:
1688
      self._LockInstancesNodes(primary_only=True)
1689
    elif level == locking.LEVEL_NODE_RES:
1690
      # Copy node locks
1691
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1692
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1693

    
1694
  def BuildHooksEnv(self):
1695
    """Build hooks env.
1696

1697
    This runs on master, primary and secondary nodes of the instance.
1698

1699
    """
1700
    env = {
1701
      "TARGET_NODE": self.op.target_node,
1702
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1703
      }
1704
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1705
    return env
1706

    
1707
  def BuildHooksNodes(self):
1708
    """Build hooks nodes.
1709

1710
    """
1711
    nl = [
1712
      self.cfg.GetMasterNode(),
1713
      self.instance.primary_node,
1714
      self.op.target_node_uuid,
1715
      ]
1716
    return (nl, nl)
1717

    
1718
  def CheckPrereq(self):
1719
    """Check prerequisites.
1720

1721
    This checks that the instance is in the cluster.
1722

1723
    """
1724
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1725
    assert self.instance is not None, \
1726
      "Cannot retrieve locked instance %s" % self.op.instance_name
1727

    
1728
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1729
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1730
                                 self.instance.disk_template,
1731
                                 errors.ECODE_STATE)
1732

    
1733
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1734
    assert target_node is not None, \
1735
      "Cannot retrieve locked node %s" % self.op.target_node
1736

    
1737
    self.target_node_uuid = target_node.uuid
1738
    if target_node.uuid == self.instance.primary_node:
1739
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1740
                                 (self.instance.name, target_node.name),
1741
                                 errors.ECODE_STATE)
1742

    
1743
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1744

    
1745
    for idx, dsk in enumerate(self.instance.disks):
1746
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1747
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1748
                                   " cannot copy" % idx, errors.ECODE_STATE)
1749

    
1750
    CheckNodeOnline(self, target_node.uuid)
1751
    CheckNodeNotDrained(self, target_node.uuid)
1752
    CheckNodeVmCapable(self, target_node.uuid)
1753
    cluster = self.cfg.GetClusterInfo()
1754
    group_info = self.cfg.GetNodeGroup(target_node.group)
1755
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1756
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1757
                           ignore=self.op.ignore_ipolicy)
1758

    
1759
    if self.instance.admin_state == constants.ADMINST_UP:
1760
      # check memory requirements on the secondary node
1761
      CheckNodeFreeMemory(
1762
          self, target_node.uuid, "failing over instance %s" %
1763
          self.instance.name, bep[constants.BE_MAXMEM],
1764
          self.instance.hypervisor,
1765
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1766
    else:
1767
      self.LogInfo("Not checking memory on the secondary node as"
1768
                   " instance will not be started")
1769

    
1770
    # check bridge existance
1771
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1772

    
1773
  def Exec(self, feedback_fn):
1774
    """Move an instance.
1775

1776
    The move is done by shutting it down on its present node, copying
1777
    the data over (slow) and starting it on the new node.
1778

1779
    """
1780
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1781
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1782

    
1783
    self.LogInfo("Shutting down instance %s on source node %s",
1784
                 self.instance.name, source_node.name)
1785

    
1786
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1787
            self.owned_locks(locking.LEVEL_NODE_RES))
1788

    
1789
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1790
                                             self.op.shutdown_timeout,
1791
                                             self.op.reason)
1792
    if self.op.ignore_consistency:
1793
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1794
                  " anyway. Please make sure node %s is down. Error details" %
1795
                  (self.instance.name, source_node.name, source_node.name),
1796
                  self.LogWarning)
1797
    else:
1798
      result.Raise("Could not shutdown instance %s on node %s" %
1799
                   (self.instance.name, source_node.name))
1800

    
1801
    # create the target disks
1802
    try:
1803
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1804
    except errors.OpExecError:
1805
      self.LogWarning("Device creation failed")
1806
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1807
      raise
1808

    
1809
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1810

    
1811
    errs = []
1812
    # activate, get path, copy the data over
1813
    for idx, disk in enumerate(self.instance.disks):
1814
      self.LogInfo("Copying data for disk %d", idx)
1815
      result = self.rpc.call_blockdev_assemble(
1816
                 target_node.uuid, (disk, self.instance), self.instance.name,
1817
                 True, idx)
1818
      if result.fail_msg:
1819
        self.LogWarning("Can't assemble newly created disk %d: %s",
1820
                        idx, result.fail_msg)
1821
        errs.append(result.fail_msg)
1822
        break
1823
      dev_path = result.payload
1824
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1825
                                                                self.instance),
1826
                                             target_node.name, dev_path,
1827
                                             cluster_name)
1828
      if result.fail_msg:
1829
        self.LogWarning("Can't copy data over for disk %d: %s",
1830
                        idx, result.fail_msg)
1831
        errs.append(result.fail_msg)
1832
        break
1833

    
1834
    if errs:
1835
      self.LogWarning("Some disks failed to copy, aborting")
1836
      try:
1837
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1838
      finally:
1839
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1840
        raise errors.OpExecError("Errors during disk copy: %s" %
1841
                                 (",".join(errs),))
1842

    
1843
    self.instance.primary_node = target_node.uuid
1844
    self.cfg.Update(self.instance, feedback_fn)
1845

    
1846
    self.LogInfo("Removing the disks on the original node")
1847
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1848

    
1849
    # Only start the instance if it's marked as up
1850
    if self.instance.admin_state == constants.ADMINST_UP:
1851
      self.LogInfo("Starting instance %s on node %s",
1852
                   self.instance.name, target_node.name)
1853

    
1854
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1855
                                          ignore_secondaries=True)
1856
      if not disks_ok:
1857
        ShutdownInstanceDisks(self, self.instance)
1858
        raise errors.OpExecError("Can't activate the instance's disks")
1859

    
1860
      result = self.rpc.call_instance_start(target_node.uuid,
1861
                                            (self.instance, None, None), False,
1862
                                            self.op.reason)
1863
      msg = result.fail_msg
1864
      if msg:
1865
        ShutdownInstanceDisks(self, self.instance)
1866
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1867
                                 (self.instance.name, target_node.name, msg))
1868

    
1869

    
1870
class LUInstanceMultiAlloc(NoHooksLU):
1871
  """Allocates multiple instances at the same time.
1872

1873
  """
1874
  REQ_BGL = False
1875

    
1876
  def CheckArguments(self):
1877
    """Check arguments.
1878

1879
    """
1880
    nodes = []
1881
    for inst in self.op.instances:
1882
      if inst.iallocator is not None:
1883
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1884
                                   " instance objects", errors.ECODE_INVAL)
1885
      nodes.append(bool(inst.pnode))
1886
      if inst.disk_template in constants.DTS_INT_MIRROR:
1887
        nodes.append(bool(inst.snode))
1888

    
1889
    has_nodes = compat.any(nodes)
1890
    if compat.all(nodes) ^ has_nodes:
1891
      raise errors.OpPrereqError("There are instance objects providing"
1892
                                 " pnode/snode while others do not",
1893
                                 errors.ECODE_INVAL)
1894

    
1895
    if self.op.iallocator is None:
1896
      default_iallocator = self.cfg.GetDefaultIAllocator()
1897
      if default_iallocator and has_nodes:
1898
        self.op.iallocator = default_iallocator
1899
      else:
1900
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1901
                                   " given and no cluster-wide default"
1902
                                   " iallocator found; please specify either"
1903
                                   " an iallocator or nodes on the instances"
1904
                                   " or set a cluster-wide default iallocator",
1905
                                   errors.ECODE_INVAL)
1906

    
1907
    _CheckOpportunisticLocking(self.op)
1908

    
1909
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1910
    if dups:
1911
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1912
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1913

    
1914
  def ExpandNames(self):
1915
    """Calculate the locks.
1916

1917
    """
1918
    self.share_locks = ShareAll()
1919
    self.needed_locks = {
1920
      # iallocator will select nodes and even if no iallocator is used,
1921
      # collisions with LUInstanceCreate should be avoided
1922
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1923
      }
1924

    
1925
    if self.op.iallocator:
1926
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1927
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1928

    
1929
      if self.op.opportunistic_locking:
1930
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1931
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1932
    else:
1933
      nodeslist = []
1934
      for inst in self.op.instances:
1935
        (inst.pnode_uuid, inst.pnode) = \
1936
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1937
        nodeslist.append(inst.pnode)
1938
        if inst.snode is not None:
1939
          (inst.snode_uuid, inst.snode) = \
1940
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1941
          nodeslist.append(inst.snode)
1942

    
1943
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1944
      # Lock resources of instance's primary and secondary nodes (copy to
1945
      # prevent accidential modification)
1946
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1947

    
1948
  def CheckPrereq(self):
1949
    """Check prerequisite.
1950

1951
    """
1952
    cluster = self.cfg.GetClusterInfo()
1953
    default_vg = self.cfg.GetVGName()
1954
    ec_id = self.proc.GetECId()
1955

    
1956
    if self.op.opportunistic_locking:
1957
      # Only consider nodes for which a lock is held
1958
      node_whitelist = self.cfg.GetNodeNames(
1959
                         list(self.owned_locks(locking.LEVEL_NODE)))
1960
    else:
1961
      node_whitelist = None
1962

    
1963
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1964
                                         _ComputeNics(op, cluster, None,
1965
                                                      self.cfg, ec_id),
1966
                                         _ComputeFullBeParams(op, cluster),
1967
                                         node_whitelist)
1968
             for op in self.op.instances]
1969

    
1970
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1971
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1972

    
1973
    ial.Run(self.op.iallocator)
1974

    
1975
    if not ial.success:
1976
      raise errors.OpPrereqError("Can't compute nodes using"
1977
                                 " iallocator '%s': %s" %
1978
                                 (self.op.iallocator, ial.info),
1979
                                 errors.ECODE_NORES)
1980

    
1981
    self.ia_result = ial.result
1982

    
1983
    if self.op.dry_run:
1984
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1985
        constants.JOB_IDS_KEY: [],
1986
        })
1987

    
1988
  def _ConstructPartialResult(self):
1989
    """Contructs the partial result.
1990

1991
    """
1992
    (allocatable, failed) = self.ia_result
1993
    return {
1994
      constants.ALLOCATABLE_KEY: map(compat.fst, allocatable),
1995
      constants.FAILED_KEY: failed,
1996
      }
1997

    
1998
  def Exec(self, feedback_fn):
1999
    """Executes the opcode.
2000

2001
    """
2002
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
2003
    (allocatable, failed) = self.ia_result
2004

    
2005
    jobs = []
2006
    for (name, node_names) in allocatable:
2007
      op = op2inst.pop(name)
2008

    
2009
      (op.pnode_uuid, op.pnode) = \
2010
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2011
      if len(node_names) > 1:
2012
        (op.snode_uuid, op.snode) = \
2013
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2014

    
2015
      jobs.append([op])
2016

    
2017
    missing = set(op2inst.keys()) - set(failed)
2018
    assert not missing, \
2019
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
2020

    
2021
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2022

    
2023

    
2024
class _InstNicModPrivate:
2025
  """Data structure for network interface modifications.
2026

2027
  Used by L{LUInstanceSetParams}.
2028

2029
  """
2030
  def __init__(self):
2031
    self.params = None
2032
    self.filled = None
2033

    
2034

    
2035
def _PrepareContainerMods(mods, private_fn):
2036
  """Prepares a list of container modifications by adding a private data field.
2037

2038
  @type mods: list of tuples; (operation, index, parameters)
2039
  @param mods: List of modifications
2040
  @type private_fn: callable or None
2041
  @param private_fn: Callable for constructing a private data field for a
2042
    modification
2043
  @rtype: list
2044

2045
  """
2046
  if private_fn is None:
2047
    fn = lambda: None
2048
  else:
2049
    fn = private_fn
2050

    
2051
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2052

    
2053

    
2054
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2055
  """Checks if nodes have enough physical CPUs
2056

2057
  This function checks if all given nodes have the needed number of
2058
  physical CPUs. In case any node has less CPUs or we cannot get the
2059
  information from the node, this function raises an OpPrereqError
2060
  exception.
2061

2062
  @type lu: C{LogicalUnit}
2063
  @param lu: a logical unit from which we get configuration data
2064
  @type node_uuids: C{list}
2065
  @param node_uuids: the list of node UUIDs to check
2066
  @type requested: C{int}
2067
  @param requested: the minimum acceptable number of physical CPUs
2068
  @type hypervisor_specs: list of pairs (string, dict of strings)
2069
  @param hypervisor_specs: list of hypervisor specifications in
2070
      pairs (hypervisor_name, hvparams)
2071
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2072
      or we cannot check the node
2073

2074
  """
2075
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2076
  for node_uuid in node_uuids:
2077
    info = nodeinfo[node_uuid]
2078
    node_name = lu.cfg.GetNodeName(node_uuid)
2079
    info.Raise("Cannot get current information from node %s" % node_name,
2080
               prereq=True, ecode=errors.ECODE_ENVIRON)
2081
    (_, _, (hv_info, )) = info.payload
2082
    num_cpus = hv_info.get("cpu_total", None)
2083
    if not isinstance(num_cpus, int):
2084
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2085
                                 " on node %s, result was '%s'" %
2086
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2087
    if requested > num_cpus:
2088
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2089
                                 "required" % (node_name, num_cpus, requested),
2090
                                 errors.ECODE_NORES)
2091

    
2092

    
2093
def GetItemFromContainer(identifier, kind, container):
2094
  """Return the item refered by the identifier.
2095

2096
  @type identifier: string
2097
  @param identifier: Item index or name or UUID
2098
  @type kind: string
2099
  @param kind: One-word item description
2100
  @type container: list
2101
  @param container: Container to get the item from
2102

2103
  """
2104
  # Index
2105
  try:
2106
    idx = int(identifier)
2107
    if idx == -1:
2108
      # Append
2109
      absidx = len(container) - 1
2110
    elif idx < 0:
2111
      raise IndexError("Not accepting negative indices other than -1")
2112
    elif idx > len(container):
2113
      raise IndexError("Got %s index %s, but there are only %s" %
2114
                       (kind, idx, len(container)))
2115
    else:
2116
      absidx = idx
2117
    return (absidx, container[idx])
2118
  except ValueError:
2119
    pass
2120

    
2121
  for idx, item in enumerate(container):
2122
    if item.uuid == identifier or item.name == identifier:
2123
      return (idx, item)
2124

    
2125
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2126
                             (kind, identifier), errors.ECODE_NOENT)
2127

    
2128

    
2129
def _ApplyContainerMods(kind, container, chgdesc, mods,
2130
                        create_fn, modify_fn, remove_fn):
2131
  """Applies descriptions in C{mods} to C{container}.
2132

2133
  @type kind: string
2134
  @param kind: One-word item description
2135
  @type container: list
2136
  @param container: Container to modify
2137
  @type chgdesc: None or list
2138
  @param chgdesc: List of applied changes
2139
  @type mods: list
2140
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2141
  @type create_fn: callable
2142
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2143
    receives absolute item index, parameters and private data object as added
2144
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2145
    as list
2146
  @type modify_fn: callable
2147
  @param modify_fn: Callback for modifying an existing item
2148
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2149
    and private data object as added by L{_PrepareContainerMods}, returns
2150
    changes as list
2151
  @type remove_fn: callable
2152
  @param remove_fn: Callback on removing item; receives absolute item index,
2153
    item and private data object as added by L{_PrepareContainerMods}
2154

2155
  """
2156
  for (op, identifier, params, private) in mods:
2157
    changes = None
2158

    
2159
    if op == constants.DDM_ADD:
2160
      # Calculate where item will be added
2161
      # When adding an item, identifier can only be an index
2162
      try:
2163
        idx = int(identifier)
2164
      except ValueError:
2165
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2166
                                   " identifier for %s" % constants.DDM_ADD,
2167
                                   errors.ECODE_INVAL)
2168
      if idx == -1:
2169
        addidx = len(container)
2170
      else:
2171
        if idx < 0:
2172
          raise IndexError("Not accepting negative indices other than -1")
2173
        elif idx > len(container):
2174
          raise IndexError("Got %s index %s, but there are only %s" %
2175
                           (kind, idx, len(container)))
2176
        addidx = idx
2177

    
2178
      if create_fn is None:
2179
        item = params
2180
      else:
2181
        (item, changes) = create_fn(addidx, params, private)
2182

    
2183
      if idx == -1:
2184
        container.append(item)
2185
      else:
2186
        assert idx >= 0
2187
        assert idx <= len(container)
2188
        # list.insert does so before the specified index
2189
        container.insert(idx, item)
2190
    else:
2191
      # Retrieve existing item
2192
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2193

    
2194
      if op == constants.DDM_REMOVE:
2195
        assert not params
2196

    
2197
        if remove_fn is not None:
2198
          remove_fn(absidx, item, private)
2199

    
2200
        changes = [("%s/%s" % (kind, absidx), "remove")]
2201

    
2202
        assert container[absidx] == item
2203
        del container[absidx]
2204
      elif op == constants.DDM_MODIFY:
2205
        if modify_fn is not None:
2206
          changes = modify_fn(absidx, item, params, private)
2207
      else:
2208
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2209

    
2210
    assert _TApplyContModsCbChanges(changes)
2211

    
2212
    if not (chgdesc is None or changes is None):
2213
      chgdesc.extend(changes)
2214

    
2215

    
2216
def _UpdateIvNames(base_index, disks):
2217
  """Updates the C{iv_name} attribute of disks.
2218

2219
  @type disks: list of L{objects.Disk}
2220

2221
  """
2222
  for (idx, disk) in enumerate(disks):
2223
    disk.iv_name = "disk/%s" % (base_index + idx, )
2224

    
2225

    
2226
class LUInstanceSetParams(LogicalUnit):
2227
  """Modifies an instances's parameters.
2228

2229
  """
2230
  HPATH = "instance-modify"
2231
  HTYPE = constants.HTYPE_INSTANCE
2232
  REQ_BGL = False
2233

    
2234
  @staticmethod
2235
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2236
    assert ht.TList(mods)
2237
    assert not mods or len(mods[0]) in (2, 3)
2238

    
2239
    if mods and len(mods[0]) == 2:
2240
      result = []
2241

    
2242
      addremove = 0
2243
      for op, params in mods:
2244
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2245
          result.append((op, -1, params))
2246
          addremove += 1
2247

    
2248
          if addremove > 1:
2249
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2250
                                       " supported at a time" % kind,
2251
                                       errors.ECODE_INVAL)
2252
        else:
2253
          result.append((constants.DDM_MODIFY, op, params))
2254

    
2255
      assert verify_fn(result)
2256
    else:
2257
      result = mods
2258

    
2259
    return result
2260

    
2261
  @staticmethod
2262
  def _CheckMods(kind, mods, key_types, item_fn):
2263
    """Ensures requested disk/NIC modifications are valid.
2264

2265
    """
2266
    for (op, _, params) in mods:
2267
      assert ht.TDict(params)
2268

    
2269
      # If 'key_types' is an empty dict, we assume we have an
2270
      # 'ext' template and thus do not ForceDictType
2271
      if key_types:
2272
        utils.ForceDictType(params, key_types)
2273

    
2274
      if op == constants.DDM_REMOVE:
2275
        if params:
2276
          raise errors.OpPrereqError("No settings should be passed when"
2277
                                     " removing a %s" % kind,
2278
                                     errors.ECODE_INVAL)
2279
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2280
        item_fn(op, params)
2281
      else:
2282
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2283

    
2284
  @staticmethod
2285
  def _VerifyDiskModification(op, params, excl_stor):
2286
    """Verifies a disk modification.
2287

2288
    """
2289
    if op == constants.DDM_ADD:
2290
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2291
      if mode not in constants.DISK_ACCESS_SET:
2292
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2293
                                   errors.ECODE_INVAL)
2294

    
2295
      size = params.get(constants.IDISK_SIZE, None)
2296
      if size is None:
2297
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2298
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2299

    
2300
      try:
2301
        size = int(size)
2302
      except (TypeError, ValueError), err:
2303
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2304
                                   errors.ECODE_INVAL)
2305

    
2306
      params[constants.IDISK_SIZE] = size
2307
      name = params.get(constants.IDISK_NAME, None)
2308
      if name is not None and name.lower() == constants.VALUE_NONE:
2309
        params[constants.IDISK_NAME] = None
2310

    
2311
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2312

    
2313
    elif op == constants.DDM_MODIFY:
2314
      if constants.IDISK_SIZE in params:
2315
        raise errors.OpPrereqError("Disk size change not possible, use"
2316
                                   " grow-disk", errors.ECODE_INVAL)
2317
      if len(params) > 2:
2318
        raise errors.OpPrereqError("Disk modification doesn't support"
2319
                                   " additional arbitrary parameters",
2320
                                   errors.ECODE_INVAL)
2321
      name = params.get(constants.IDISK_NAME, None)
2322
      if name is not None and name.lower() == constants.VALUE_NONE:
2323
        params[constants.IDISK_NAME] = None
2324

    
2325
  @staticmethod
2326
  def _VerifyNicModification(op, params):
2327
    """Verifies a network interface modification.
2328

2329
    """
2330
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2331
      ip = params.get(constants.INIC_IP, None)
2332
      name = params.get(constants.INIC_NAME, None)
2333
      req_net = params.get(constants.INIC_NETWORK, None)
2334
      link = params.get(constants.NIC_LINK, None)
2335
      mode = params.get(constants.NIC_MODE, None)
2336
      if name is not None and name.lower() == constants.VALUE_NONE:
2337
        params[constants.INIC_NAME] = None
2338
      if req_net is not None:
2339
        if req_net.lower() == constants.VALUE_NONE:
2340
          params[constants.INIC_NETWORK] = None
2341
          req_net = None
2342
        elif link is not None or mode is not None:
2343
          raise errors.OpPrereqError("If network is given"
2344
                                     " mode or link should not",
2345
                                     errors.ECODE_INVAL)
2346

    
2347
      if op == constants.DDM_ADD:
2348
        macaddr = params.get(constants.INIC_MAC, None)
2349
        if macaddr is None:
2350
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2351

    
2352
      if ip is not None:
2353
        if ip.lower() == constants.VALUE_NONE:
2354
          params[constants.INIC_IP] = None
2355
        else:
2356
          if ip.lower() == constants.NIC_IP_POOL:
2357
            if op == constants.DDM_ADD and req_net is None:
2358
              raise errors.OpPrereqError("If ip=pool, parameter network"
2359
                                         " cannot be none",
2360
                                         errors.ECODE_INVAL)
2361
          else:
2362
            if not netutils.IPAddress.IsValid(ip):
2363
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2364
                                         errors.ECODE_INVAL)
2365

    
2366
      if constants.INIC_MAC in params:
2367
        macaddr = params[constants.INIC_MAC]
2368
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2369
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2370

    
2371
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2372
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2373
                                     " modifying an existing NIC",
2374
                                     errors.ECODE_INVAL)
2375

    
2376
  def CheckArguments(self):
2377
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2378
            self.op.hvparams or self.op.beparams or self.op.os_name or
2379
            self.op.offline is not None or self.op.runtime_mem or
2380
            self.op.pnode):
2381
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2382

    
2383
    if self.op.hvparams:
2384
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2385
                           "hypervisor", "instance", "cluster")
2386

    
2387
    self.op.disks = self._UpgradeDiskNicMods(
2388
      "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2389
    self.op.nics = self._UpgradeDiskNicMods(
2390
      "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2391

    
2392
    if self.op.disks and self.op.disk_template is not None:
2393
      raise errors.OpPrereqError("Disk template conversion and other disk"
2394
                                 " changes not supported at the same time",
2395
                                 errors.ECODE_INVAL)
2396

    
2397
    if (self.op.disk_template and
2398
        self.op.disk_template in constants.DTS_INT_MIRROR and
2399
        self.op.remote_node is None):
2400
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2401
                                 " one requires specifying a secondary node",
2402
                                 errors.ECODE_INVAL)
2403

    
2404
    # Check NIC modifications
2405
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2406
                    self._VerifyNicModification)
2407

    
2408
    if self.op.pnode:
2409
      (self.op.pnode_uuid, self.op.pnode) = \
2410
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2411

    
2412
  def ExpandNames(self):
2413
    self._ExpandAndLockInstance()
2414
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2415
    # Can't even acquire node locks in shared mode as upcoming changes in
2416
    # Ganeti 2.6 will start to modify the node object on disk conversion
2417
    self.needed_locks[locking.LEVEL_NODE] = []
2418
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2419
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2420
    # Look node group to look up the ipolicy
2421
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2422

    
2423
  def DeclareLocks(self, level):
2424
    if level == locking.LEVEL_NODEGROUP:
2425
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2426
      # Acquire locks for the instance's nodegroups optimistically. Needs
2427
      # to be verified in CheckPrereq
2428
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2429
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2430
    elif level == locking.LEVEL_NODE:
2431
      self._LockInstancesNodes()
2432
      if self.op.disk_template and self.op.remote_node:
2433
        (self.op.remote_node_uuid, self.op.remote_node) = \
2434
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2435
                                self.op.remote_node)
2436
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2437
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2438
      # Copy node locks
2439
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2440
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2441

    
2442
  def BuildHooksEnv(self):
2443
    """Build hooks env.
2444

2445
    This runs on the master, primary and secondaries.
2446

2447
    """
2448
    args = {}
2449
    if constants.BE_MINMEM in self.be_new:
2450
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2451
    if constants.BE_MAXMEM in self.be_new:
2452
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2453
    if constants.BE_VCPUS in self.be_new:
2454
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2455
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2456
    # information at all.
2457

    
2458
    if self._new_nics is not None:
2459
      nics = []
2460

    
2461
      for nic in self._new_nics:
2462
        n = copy.deepcopy(nic)
2463
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2464
        n.nicparams = nicparams
2465
        nics.append(NICToTuple(self, n))
2466

    
2467
      args["nics"] = nics
2468

    
2469
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2470
    if self.op.disk_template:
2471
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2472
    if self.op.runtime_mem:
2473
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2474

    
2475
    return env
2476

    
2477
  def BuildHooksNodes(self):
2478
    """Build hooks nodes.
2479

2480
    """
2481
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2482
    return (nl, nl)
2483

    
2484
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2485
                              old_params, cluster, pnode_uuid):
2486

    
2487
    update_params_dict = dict([(key, params[key])
2488
                               for key in constants.NICS_PARAMETERS
2489
                               if key in params])
2490

    
2491
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2492
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2493

    
2494
    new_net_uuid = None
2495
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2496
    if new_net_uuid_or_name:
2497
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2498
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2499

    
2500
    if old_net_uuid:
2501
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2502

    
2503
    if new_net_uuid:
2504
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2505
      if not netparams:
2506
        raise errors.OpPrereqError("No netparams found for the network"
2507
                                   " %s, probably not connected" %
2508
                                   new_net_obj.name, errors.ECODE_INVAL)
2509
      new_params = dict(netparams)
2510
    else:
2511
      new_params = GetUpdatedParams(old_params, update_params_dict)
2512

    
2513
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2514

    
2515
    new_filled_params = cluster.SimpleFillNIC(new_params)
2516
    objects.NIC.CheckParameterSyntax(new_filled_params)
2517

    
2518
    new_mode = new_filled_params[constants.NIC_MODE]
2519
    if new_mode == constants.NIC_MODE_BRIDGED:
2520
      bridge = new_filled_params[constants.NIC_LINK]
2521
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2522
      if msg:
2523
        msg = "Error checking bridges on node '%s': %s" % \
2524
                (self.cfg.GetNodeName(pnode_uuid), msg)
2525
        if self.op.force:
2526
          self.warn.append(msg)
2527
        else:
2528
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2529

    
2530
    elif new_mode == constants.NIC_MODE_ROUTED:
2531
      ip = params.get(constants.INIC_IP, old_ip)
2532
      if ip is None:
2533
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2534
                                   " on a routed NIC", errors.ECODE_INVAL)
2535

    
2536
    elif new_mode == constants.NIC_MODE_OVS:
2537
      # TODO: check OVS link
2538
      self.LogInfo("OVS links are currently not checked for correctness")
2539

    
2540
    if constants.INIC_MAC in params:
2541
      mac = params[constants.INIC_MAC]
2542
      if mac is None:
2543
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2544
                                   errors.ECODE_INVAL)
2545
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2546
        # otherwise generate the MAC address
2547
        params[constants.INIC_MAC] = \
2548
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2549
      else:
2550
        # or validate/reserve the current one
2551
        try:
2552
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2553
        except errors.ReservationError:
2554
          raise errors.OpPrereqError("MAC address '%s' already in use"
2555
                                     " in cluster" % mac,
2556
                                     errors.ECODE_NOTUNIQUE)
2557
    elif new_net_uuid != old_net_uuid:
2558

    
2559
      def get_net_prefix(net_uuid):
2560
        mac_prefix = None
2561
        if net_uuid:
2562
          nobj = self.cfg.GetNetwork(net_uuid)
2563
          mac_prefix = nobj.mac_prefix
2564

    
2565
        return mac_prefix
2566

    
2567
      new_prefix = get_net_prefix(new_net_uuid)
2568
      old_prefix = get_net_prefix(old_net_uuid)
2569
      if old_prefix != new_prefix:
2570
        params[constants.INIC_MAC] = \
2571
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2572

    
2573
    # if there is a change in (ip, network) tuple
2574
    new_ip = params.get(constants.INIC_IP, old_ip)
2575
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2576
      if new_ip:
2577
        # if IP is pool then require a network and generate one IP
2578
        if new_ip.lower() == constants.NIC_IP_POOL:
2579
          if new_net_uuid:
2580
            try:
2581
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2582
            except errors.ReservationError:
2583
              raise errors.OpPrereqError("Unable to get a free IP"
2584
                                         " from the address pool",
2585
                                         errors.ECODE_STATE)
2586
            self.LogInfo("Chose IP %s from network %s",
2587
                         new_ip,
2588
                         new_net_obj.name)
2589
            params[constants.INIC_IP] = new_ip
2590
          else:
2591
            raise errors.OpPrereqError("ip=pool, but no network found",
2592
                                       errors.ECODE_INVAL)
2593
        # Reserve new IP if in the new network if any
2594
        elif new_net_uuid:
2595
          try:
2596
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2597
            self.LogInfo("Reserving IP %s in network %s",
2598
                         new_ip, new_net_obj.name)
2599
          except errors.ReservationError:
2600
            raise errors.OpPrereqError("IP %s not available in network %s" %
2601
                                       (new_ip, new_net_obj.name),
2602
                                       errors.ECODE_NOTUNIQUE)
2603
        # new network is None so check if new IP is a conflicting IP
2604
        elif self.op.conflicts_check:
2605
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2606

    
2607
      # release old IP if old network is not None
2608
      if old_ip and old_net_uuid:
2609
        try:
2610
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2611
        except errors.AddressPoolError:
2612
          logging.warning("Release IP %s not contained in network %s",
2613
                          old_ip, old_net_obj.name)
2614

    
2615
    # there are no changes in (ip, network) tuple and old network is not None
2616
    elif (old_net_uuid is not None and
2617
          (req_link is not None or req_mode is not None)):
2618
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2619
                                 " a NIC that is connected to a network",
2620
                                 errors.ECODE_INVAL)
2621

    
2622
    private.params = new_params
2623
    private.filled = new_filled_params
2624

    
2625
  def _PreCheckDiskTemplate(self, pnode_info):
2626
    """CheckPrereq checks related to a new disk template."""
2627
    # Arguments are passed to avoid configuration lookups
2628
    pnode_uuid = self.instance.primary_node
2629
    if self.instance.disk_template == self.op.disk_template:
2630
      raise errors.OpPrereqError("Instance already has disk template %s" %
2631
                                 self.instance.disk_template,
2632
                                 errors.ECODE_INVAL)
2633

    
2634
    if not self.cluster.IsDiskTemplateEnabled(self.instance.disk_template):
2635
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2636
                                 " cluster." % self.instance.disk_template)
2637

    
2638
    if (self.instance.disk_template,
2639
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2640
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2641
                                 " %s to %s" % (self.instance.disk_template,
2642
                                                self.op.disk_template),
2643
                                 errors.ECODE_INVAL)
2644
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2645
                       msg="cannot change disk template")
2646
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2647
      if self.op.remote_node_uuid == pnode_uuid:
2648
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2649
                                   " as the primary node of the instance" %
2650
                                   self.op.remote_node, errors.ECODE_STATE)
2651
      CheckNodeOnline(self, self.op.remote_node_uuid)
2652
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2653
      # FIXME: here we assume that the old instance type is DT_PLAIN
2654
      assert self.instance.disk_template == constants.DT_PLAIN
2655
      disks = [{constants.IDISK_SIZE: d.size,
2656
                constants.IDISK_VG: d.logical_id[0]}
2657
               for d in self.instance.disks]
2658
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2659
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2660

    
2661
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2662
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2663
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2664
                                                              snode_group)
2665
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2666
                             ignore=self.op.ignore_ipolicy)
2667
      if pnode_info.group != snode_info.group:
2668
        self.LogWarning("The primary and secondary nodes are in two"
2669
                        " different node groups; the disk parameters"
2670
                        " from the first disk's node group will be"
2671
                        " used")
2672

    
2673
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2674
      # Make sure none of the nodes require exclusive storage
2675
      nodes = [pnode_info]
2676
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2677
        assert snode_info
2678
        nodes.append(snode_info)
2679
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2680
      if compat.any(map(has_es, nodes)):
2681
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2682
                  " storage is enabled" % (self.instance.disk_template,
2683
                                           self.op.disk_template))
2684
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2685

    
2686
  def _PreCheckDisks(self, ispec):
2687
    """CheckPrereq checks related to disk changes.
2688

2689
    @type ispec: dict
2690
    @param ispec: instance specs to be updated with the new disks
2691

2692
    """
2693
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2694

    
2695
    excl_stor = compat.any(
2696
      rpc.GetExclusiveStorageForNodes(self.cfg,
2697
                                      self.instance.all_nodes).values()
2698
      )
2699

    
2700
    # Check disk modifications. This is done here and not in CheckArguments
2701
    # (as with NICs), because we need to know the instance's disk template
2702
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2703
    if self.instance.disk_template == constants.DT_EXT:
2704
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2705
    else:
2706
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2707
                      ver_fn)
2708

    
2709
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2710

    
2711
    # Check the validity of the `provider' parameter
2712
    if self.instance.disk_template in constants.DT_EXT:
2713
      for mod in self.diskmod:
2714
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2715
        if mod[0] == constants.DDM_ADD:
2716
          if ext_provider is None:
2717
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2718
                                       " '%s' missing, during disk add" %
2719
                                       (constants.DT_EXT,
2720
                                        constants.IDISK_PROVIDER),
2721
                                       errors.ECODE_NOENT)
2722
        elif mod[0] == constants.DDM_MODIFY:
2723
          if ext_provider:
2724
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2725
                                       " modification" %
2726
                                       constants.IDISK_PROVIDER,
2727
                                       errors.ECODE_INVAL)
2728
    else:
2729
      for mod in self.diskmod:
2730
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2731
        if ext_provider is not None:
2732
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2733
                                     " instances of type '%s'" %
2734
                                     (constants.IDISK_PROVIDER,
2735
                                      constants.DT_EXT),
2736
                                     errors.ECODE_INVAL)
2737

    
2738
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2739
      raise errors.OpPrereqError("Disk operations not supported for"
2740
                                 " diskless instances", errors.ECODE_INVAL)
2741

    
2742
    def _PrepareDiskMod(_, disk, params, __):
2743
      disk.name = params.get(constants.IDISK_NAME, None)
2744

    
2745
    # Verify disk changes (operating on a copy)
2746
    disks = copy.deepcopy(self.instance.disks)
2747
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2748
                        _PrepareDiskMod, None)
2749
    utils.ValidateDeviceNames("disk", disks)
2750
    if len(disks) > constants.MAX_DISKS:
2751
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2752
                                 " more" % constants.MAX_DISKS,
2753
                                 errors.ECODE_STATE)
2754
    disk_sizes = [disk.size for disk in self.instance.disks]
2755
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2756
                      self.diskmod if op == constants.DDM_ADD)
2757
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2758
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2759

    
2760
    if self.op.offline is not None and self.op.offline:
2761
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2762
                         msg="can't change to offline")
2763

    
2764
  def CheckPrereq(self):
2765
    """Check prerequisites.
2766

2767
    This only checks the instance list against the existing names.
2768

2769
    """
2770
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2771
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2772
    self.cluster = self.cfg.GetClusterInfo()
2773

    
2774
    assert self.instance is not None, \
2775
      "Cannot retrieve locked instance %s" % self.op.instance_name
2776

    
2777
    pnode_uuid = self.instance.primary_node
2778

    
2779
    self.warn = []
2780

    
2781
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2782
        not self.op.force):
2783
      # verify that the instance is not up
2784
      instance_info = self.rpc.call_instance_info(
2785
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2786
          self.instance.hvparams)
2787
      if instance_info.fail_msg:
2788
        self.warn.append("Can't get instance runtime information: %s" %
2789
                         instance_info.fail_msg)
2790
      elif instance_info.payload:
2791
        raise errors.OpPrereqError("Instance is still running on %s" %
2792
                                   self.cfg.GetNodeName(pnode_uuid),
2793
                                   errors.ECODE_STATE)
2794

    
2795
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2796
    node_uuids = list(self.instance.all_nodes)
2797
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2798

    
2799
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2800
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2801
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2802

    
2803
    # dictionary with instance information after the modification
2804
    ispec = {}
2805

    
2806
    # Prepare NIC modifications
2807
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2808

    
2809
    # OS change
2810
    if self.op.os_name and not self.op.force:
2811
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2812
                     self.op.force_variant)
2813
      instance_os = self.op.os_name
2814
    else:
2815
      instance_os = self.instance.os
2816

    
2817
    assert not (self.op.disk_template and self.op.disks), \
2818
      "Can't modify disk template and apply disk changes at the same time"
2819

    
2820
    if self.op.disk_template:
2821
      self._PreCheckDiskTemplate(pnode_info)
2822

    
2823
    self._PreCheckDisks(ispec)
2824

    
2825
    # hvparams processing
2826
    if self.op.hvparams:
2827
      hv_type = self.instance.hypervisor
2828
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2829
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2830
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2831

    
2832
      # local check
2833
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2834
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2835
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2836
      self.hv_inst = i_hvdict # the new dict (without defaults)
2837
    else:
2838
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2839
                                                   self.instance.os,
2840
                                                   self.instance.hvparams)
2841
      self.hv_new = self.hv_inst = {}
2842

    
2843
    # beparams processing
2844
    if self.op.beparams:
2845
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2846
                                  use_none=True)
2847
      objects.UpgradeBeParams(i_bedict)
2848
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2849
      be_new = self.cluster.SimpleFillBE(i_bedict)
2850
      self.be_proposed = self.be_new = be_new # the new actual values
2851
      self.be_inst = i_bedict # the new dict (without defaults)
2852
    else:
2853
      self.be_new = self.be_inst = {}
2854
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2855
    be_old = self.cluster.FillBE(self.instance)
2856

    
2857
    # CPU param validation -- checking every time a parameter is
2858
    # changed to cover all cases where either CPU mask or vcpus have
2859
    # changed
2860
    if (constants.BE_VCPUS in self.be_proposed and
2861
        constants.HV_CPU_MASK in self.hv_proposed):
2862
      cpu_list = \
2863
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2864
      # Verify mask is consistent with number of vCPUs. Can skip this
2865
      # test if only 1 entry in the CPU mask, which means same mask
2866
      # is applied to all vCPUs.
2867
      if (len(cpu_list) > 1 and
2868
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2869
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2870
                                   " CPU mask [%s]" %
2871
                                   (self.be_proposed[constants.BE_VCPUS],
2872
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2873
                                   errors.ECODE_INVAL)
2874

    
2875
      # Only perform this test if a new CPU mask is given
2876
      if constants.HV_CPU_MASK in self.hv_new:
2877
        # Calculate the largest CPU number requested
2878
        max_requested_cpu = max(map(max, cpu_list))
2879
        # Check that all of the instance's nodes have enough physical CPUs to
2880
        # satisfy the requested CPU mask
2881
        hvspecs = [(self.instance.hypervisor,
2882
                    self.cfg.GetClusterInfo()
2883
                      .hvparams[self.instance.hypervisor])]
2884
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2885
                                max_requested_cpu + 1,
2886
                                hvspecs)
2887

    
2888
    # osparams processing
2889
    if self.op.osparams:
2890
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2891
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2892
      self.os_inst = i_osdict # the new dict (without defaults)
2893
    else:
2894
      self.os_inst = {}
2895

    
2896
    #TODO(dynmem): do the appropriate check involving MINMEM
2897
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2898
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2899
      mem_check_list = [pnode_uuid]
2900
      if be_new[constants.BE_AUTO_BALANCE]:
2901
        # either we changed auto_balance to yes or it was from before
2902
        mem_check_list.extend(self.instance.secondary_nodes)
2903
      instance_info = self.rpc.call_instance_info(
2904
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2905
          self.instance.hvparams)
2906
      hvspecs = [(self.instance.hypervisor,
2907
                  self.cluster.hvparams[self.instance.hypervisor])]
2908
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2909
                                         hvspecs)
2910
      pninfo = nodeinfo[pnode_uuid]
2911
      msg = pninfo.fail_msg
2912
      if msg:
2913
        # Assume the primary node is unreachable and go ahead
2914
        self.warn.append("Can't get info from primary node %s: %s" %
2915
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2916
      else:
2917
        (_, _, (pnhvinfo, )) = pninfo.payload
2918
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2919
          self.warn.append("Node data from primary node %s doesn't contain"
2920
                           " free memory information" %
2921
                           self.cfg.GetNodeName(pnode_uuid))
2922
        elif instance_info.fail_msg:
2923
          self.warn.append("Can't get instance runtime information: %s" %
2924
                           instance_info.fail_msg)
2925
        else:
2926
          if instance_info.payload:
2927
            current_mem = int(instance_info.payload["memory"])
2928
          else:
2929
            # Assume instance not running
2930
            # (there is a slight race condition here, but it's not very
2931
            # probable, and we have no other way to check)
2932
            # TODO: Describe race condition
2933
            current_mem = 0
2934
          #TODO(dynmem): do the appropriate check involving MINMEM
2935
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2936
                      pnhvinfo["memory_free"])
2937
          if miss_mem > 0:
2938
            raise errors.OpPrereqError("This change will prevent the instance"
2939
                                       " from starting, due to %d MB of memory"
2940
                                       " missing on its primary node" %
2941
                                       miss_mem, errors.ECODE_NORES)
2942

    
2943
      if be_new[constants.BE_AUTO_BALANCE]:
2944
        for node_uuid, nres in nodeinfo.items():
2945
          if node_uuid not in self.instance.secondary_nodes:
2946
            continue
2947
          nres.Raise("Can't get info from secondary node %s" %
2948
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2949
                     ecode=errors.ECODE_STATE)
2950
          (_, _, (nhvinfo, )) = nres.payload
2951
          if not isinstance(nhvinfo.get("memory_free", None), int):
2952
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2953
                                       " memory information" %
2954
                                       self.cfg.GetNodeName(node_uuid),
2955
                                       errors.ECODE_STATE)
2956
          #TODO(dynmem): do the appropriate check involving MINMEM
2957
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2958
            raise errors.OpPrereqError("This change will prevent the instance"
2959
                                       " from failover to its secondary node"
2960
                                       " %s, due to not enough memory" %
2961
                                       self.cfg.GetNodeName(node_uuid),
2962
                                       errors.ECODE_STATE)
2963

    
2964
    if self.op.runtime_mem:
2965
      remote_info = self.rpc.call_instance_info(
2966
         self.instance.primary_node, self.instance.name,
2967
         self.instance.hypervisor,
2968
         self.cluster.hvparams[self.instance.hypervisor])
2969
      remote_info.Raise("Error checking node %s" %
2970
                        self.cfg.GetNodeName(self.instance.primary_node))
2971
      if not remote_info.payload: # not running already
2972
        raise errors.OpPrereqError("Instance %s is not running" %
2973
                                   self.instance.name, errors.ECODE_STATE)
2974

    
2975
      current_memory = remote_info.payload["memory"]
2976
      if (not self.op.force and
2977
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2978
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2979
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2980
                                   " and %d MB of memory unless --force is"
2981
                                   " given" %
2982
                                   (self.instance.name,
2983
                                    self.be_proposed[constants.BE_MINMEM],
2984
                                    self.be_proposed[constants.BE_MAXMEM]),
2985
                                   errors.ECODE_INVAL)
2986

    
2987
      delta = self.op.runtime_mem - current_memory
2988
      if delta > 0:
2989
        CheckNodeFreeMemory(
2990
            self, self.instance.primary_node,
2991
            "ballooning memory for instance %s" % self.instance.name, delta,
2992
            self.instance.hypervisor,
2993
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2994

    
2995
    # make self.cluster visible in the functions below
2996
    cluster = self.cluster
2997

    
2998
    def _PrepareNicCreate(_, params, private):
2999
      self._PrepareNicModification(params, private, None, None,
3000
                                   {}, cluster, pnode_uuid)
3001
      return (None, None)
3002

    
3003
    def _PrepareNicMod(_, nic, params, private):
3004
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3005
                                   nic.nicparams, cluster, pnode_uuid)
3006
      return None
3007

    
3008
    def _PrepareNicRemove(_, params, __):
3009
      ip = params.ip
3010
      net = params.network
3011
      if net is not None and ip is not None:
3012
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3013

    
3014
    # Verify NIC changes (operating on copy)
3015
    nics = self.instance.nics[:]
3016
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3017
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3018
    if len(nics) > constants.MAX_NICS:
3019
      raise errors.OpPrereqError("Instance has too many network interfaces"
3020
                                 " (%d), cannot add more" % constants.MAX_NICS,
3021
                                 errors.ECODE_STATE)
3022

    
3023
    # Pre-compute NIC changes (necessary to use result in hooks)
3024
    self._nic_chgdesc = []
3025
    if self.nicmod:
3026
      # Operate on copies as this is still in prereq
3027
      nics = [nic.Copy() for nic in self.instance.nics]
3028
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3029
                          self._CreateNewNic, self._ApplyNicMods, None)
3030
      # Verify that NIC names are unique and valid
3031
      utils.ValidateDeviceNames("NIC", nics)
3032
      self._new_nics = nics
3033
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3034
    else:
3035
      self._new_nics = None
3036
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3037

    
3038
    if not self.op.ignore_ipolicy:
3039
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3040
                                                              group_info)
3041

    
3042
      # Fill ispec with backend parameters
3043
      ispec[constants.ISPEC_SPINDLE_USE] = \
3044
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3045
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3046
                                                         None)
3047

    
3048
      # Copy ispec to verify parameters with min/max values separately
3049
      if self.op.disk_template:
3050
        new_disk_template = self.op.disk_template
3051
      else:
3052
        new_disk_template = self.instance.disk_template
3053
      ispec_max = ispec.copy()
3054
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3055
        self.be_new.get(constants.BE_MAXMEM, None)
3056
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3057
                                                     new_disk_template)
3058
      ispec_min = ispec.copy()
3059
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3060
        self.be_new.get(constants.BE_MINMEM, None)
3061
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3062
                                                     new_disk_template)
3063

    
3064
      if (res_max or res_min):
3065
        # FIXME: Improve error message by including information about whether
3066
        # the upper or lower limit of the parameter fails the ipolicy.
3067
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3068
               (group_info, group_info.name,
3069
                utils.CommaJoin(set(res_max + res_min))))
3070
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3071

    
3072
  def _ConvertPlainToDrbd(self, feedback_fn):
3073
    """Converts an instance from plain to drbd.
3074

3075
    """
3076
    feedback_fn("Converting template to drbd")
3077
    pnode_uuid = self.instance.primary_node
3078
    snode_uuid = self.op.remote_node_uuid
3079

    
3080
    assert self.instance.disk_template == constants.DT_PLAIN
3081

    
3082
    # create a fake disk info for _GenerateDiskTemplate
3083
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3084
                  constants.IDISK_VG: d.logical_id[0],
3085
                  constants.IDISK_NAME: d.name}
3086
                 for d in self.instance.disks]
3087
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3088
                                     self.instance.uuid, pnode_uuid,
3089
                                     [snode_uuid], disk_info, None, None, 0,
3090
                                     feedback_fn, self.diskparams)
3091
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3092
                                        self.diskparams)
3093
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3094
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3095
    info = GetInstanceInfoText(self.instance)
3096
    feedback_fn("Creating additional volumes...")
3097
    # first, create the missing data and meta devices
3098
    for disk in anno_disks:
3099
      # unfortunately this is... not too nice
3100
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3101
                           info, True, p_excl_stor)
3102
      for child in disk.children:
3103
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3104
                             s_excl_stor)
3105
    # at this stage, all new LVs have been created, we can rename the
3106
    # old ones
3107
    feedback_fn("Renaming original volumes...")
3108
    rename_list = [(o, n.children[0].logical_id)
3109
                   for (o, n) in zip(self.instance.disks, new_disks)]
3110
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3111
    result.Raise("Failed to rename original LVs")
3112

    
3113
    feedback_fn("Initializing DRBD devices...")
3114
    # all child devices are in place, we can now create the DRBD devices
3115
    try:
3116
      for disk in anno_disks:
3117
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3118
                                       (snode_uuid, s_excl_stor)]:
3119
          f_create = node_uuid == pnode_uuid
3120
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3121
                               f_create, excl_stor)
3122
    except errors.GenericError, e:
3123
      feedback_fn("Initializing of DRBD devices failed;"
3124
                  " renaming back original volumes...")
3125
      for disk in new_disks:
3126
        self.cfg.SetDiskID(disk, pnode_uuid)
3127
      rename_back_list = [(n.children[0], o.logical_id)
3128
                          for (n, o) in zip(new_disks, self.instance.disks)]
3129
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3130
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3131
      raise
3132

    
3133
    # at this point, the instance has been modified
3134
    self.instance.disk_template = constants.DT_DRBD8
3135
    self.instance.disks = new_disks
3136
    self.cfg.Update(self.instance, feedback_fn)
3137

    
3138
    # Release node locks while waiting for sync
3139
    ReleaseLocks(self, locking.LEVEL_NODE)
3140

    
3141
    # disks are created, waiting for sync
3142
    disk_abort = not WaitForSync(self, self.instance,
3143
                                 oneshot=not self.op.wait_for_sync)
3144
    if disk_abort:
3145
      raise errors.OpExecError("There are some degraded disks for"
3146
                               " this instance, please cleanup manually")
3147

    
3148
    # Node resource locks will be released by caller
3149

    
3150
  def _ConvertDrbdToPlain(self, feedback_fn):
3151
    """Converts an instance from drbd to plain.
3152

3153
    """
3154
    assert len(self.instance.secondary_nodes) == 1
3155
    assert self.instance.disk_template == constants.DT_DRBD8
3156

    
3157
    pnode_uuid = self.instance.primary_node
3158
    snode_uuid = self.instance.secondary_nodes[0]
3159
    feedback_fn("Converting template to plain")
3160

    
3161
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3162
    new_disks = [d.children[0] for d in self.instance.disks]
3163

    
3164
    # copy over size, mode and name
3165
    for parent, child in zip(old_disks, new_disks):
3166
      child.size = parent.size
3167
      child.mode = parent.mode
3168
      child.name = parent.name
3169

    
3170
    # this is a DRBD disk, return its port to the pool
3171
    # NOTE: this must be done right before the call to cfg.Update!
3172
    for disk in old_disks:
3173
      tcp_port = disk.logical_id[2]
3174
      self.cfg.AddTcpUdpPort(tcp_port)
3175

    
3176
    # update instance structure
3177
    self.instance.disks = new_disks
3178
    self.instance.disk_template = constants.DT_PLAIN
3179
    _UpdateIvNames(0, self.instance.disks)
3180
    self.cfg.Update(self.instance, feedback_fn)
3181

    
3182
    # Release locks in case removing disks takes a while
3183
    ReleaseLocks(self, locking.LEVEL_NODE)
3184

    
3185
    feedback_fn("Removing volumes on the secondary node...")
3186
    for disk in old_disks:
3187
      self.cfg.SetDiskID(disk, snode_uuid)
3188
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3189
      if msg:
3190
        self.LogWarning("Could not remove block device %s on node %s,"
3191
                        " continuing anyway: %s", disk.iv_name,
3192
                        self.cfg.GetNodeName(snode_uuid), msg)
3193

    
3194
    feedback_fn("Removing unneeded volumes on the primary node...")
3195
    for idx, disk in enumerate(old_disks):
3196
      meta = disk.children[1]
3197
      self.cfg.SetDiskID(meta, pnode_uuid)
3198
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3199
      if msg:
3200
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3201
                        " continuing anyway: %s", idx,
3202
                        self.cfg.GetNodeName(pnode_uuid), msg)
3203

    
3204
  def _CreateNewDisk(self, idx, params, _):
3205
    """Creates a new disk.
3206

3207
    """
3208
    # add a new disk
3209
    if self.instance.disk_template in constants.DTS_FILEBASED:
3210
      (file_driver, file_path) = self.instance.disks[0].logical_id
3211
      file_path = os.path.dirname(file_path)
3212
    else:
3213
      file_driver = file_path = None
3214

    
3215
    disk = \
3216
      GenerateDiskTemplate(self, self.instance.disk_template,
3217
                           self.instance.uuid, self.instance.primary_node,
3218
                           self.instance.secondary_nodes, [params], file_path,
3219
                           file_driver, idx, self.Log, self.diskparams)[0]
3220

    
3221
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3222

    
3223
    if self.cluster.prealloc_wipe_disks:
3224
      # Wipe new disk
3225
      WipeOrCleanupDisks(self, self.instance,
3226
                         disks=[(idx, disk, 0)],
3227
                         cleanup=new_disks)
3228

    
3229
    return (disk, [
3230
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3231
      ])
3232

    
3233
  @staticmethod
3234
  def _ModifyDisk(idx, disk, params, _):
3235
    """Modifies a disk.
3236

3237
    """
3238
    changes = []
3239
    mode = params.get(constants.IDISK_MODE, None)
3240
    if mode:
3241
      disk.mode = mode
3242
      changes.append(("disk.mode/%d" % idx, disk.mode))
3243

    
3244
    name = params.get(constants.IDISK_NAME, None)
3245
    disk.name = name
3246
    changes.append(("disk.name/%d" % idx, disk.name))
3247

    
3248
    return changes
3249

    
3250
  def _RemoveDisk(self, idx, root, _):
3251
    """Removes a disk.
3252

3253
    """
3254
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3255
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3256
                             self.instance.primary_node):
3257
      self.cfg.SetDiskID(disk, node_uuid)
3258
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3259
      if msg:
3260
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3261
                        " continuing anyway", idx,
3262
                        self.cfg.GetNodeName(node_uuid), msg)
3263

    
3264
    # if this is a DRBD disk, return its port to the pool
3265
    if root.dev_type in constants.LDS_DRBD:
3266
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3267

    
3268
  def _CreateNewNic(self, idx, params, private):
3269
    """Creates data structure for a new network interface.
3270

3271
    """
3272
    mac = params[constants.INIC_MAC]
3273
    ip = params.get(constants.INIC_IP, None)
3274
    net = params.get(constants.INIC_NETWORK, None)
3275
    name = params.get(constants.INIC_NAME, None)
3276
    net_uuid = self.cfg.LookupNetwork(net)
3277
    #TODO: not private.filled?? can a nic have no nicparams??
3278
    nicparams = private.filled
3279
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3280
                       nicparams=nicparams)
3281
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3282

    
3283
    return (nobj, [
3284
      ("nic.%d" % idx,
3285
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3286
       (mac, ip, private.filled[constants.NIC_MODE],
3287
       private.filled[constants.NIC_LINK],
3288
       net)),
3289
      ])
3290

    
3291
  def _ApplyNicMods(self, idx, nic, params, private):
3292
    """Modifies a network interface.
3293

3294
    """
3295
    changes = []
3296

    
3297
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3298
      if key in params:
3299
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3300
        setattr(nic, key, params[key])
3301

    
3302
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3303
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3304
    if new_net_uuid != nic.network:
3305
      changes.append(("nic.network/%d" % idx, new_net))
3306
      nic.network = new_net_uuid
3307

    
3308
    if private.filled:
3309
      nic.nicparams = private.filled
3310

    
3311
      for (key, val) in nic.nicparams.items():
3312
        changes.append(("nic.%s/%d" % (key, idx), val))
3313

    
3314
    return changes
3315

    
3316
  def Exec(self, feedback_fn):
3317
    """Modifies an instance.
3318

3319
    All parameters take effect only at the next restart of the instance.
3320

3321
    """
3322
    # Process here the warnings from CheckPrereq, as we don't have a
3323
    # feedback_fn there.
3324
    # TODO: Replace with self.LogWarning
3325
    for warn in self.warn:
3326
      feedback_fn("WARNING: %s" % warn)
3327

    
3328
    assert ((self.op.disk_template is None) ^
3329
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3330
      "Not owning any node resource locks"
3331

    
3332
    result = []
3333

    
3334
    # New primary node
3335
    if self.op.pnode_uuid:
3336
      self.instance.primary_node = self.op.pnode_uuid
3337

    
3338
    # runtime memory
3339
    if self.op.runtime_mem:
3340
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3341
                                                     self.instance,
3342
                                                     self.op.runtime_mem)
3343
      rpcres.Raise("Cannot modify instance runtime memory")
3344
      result.append(("runtime_memory", self.op.runtime_mem))
3345

    
3346
    # Apply disk changes
3347
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3348
                        self._CreateNewDisk, self._ModifyDisk,
3349
                        self._RemoveDisk)
3350
    _UpdateIvNames(0, self.instance.disks)
3351

    
3352
    if self.op.disk_template:
3353
      if __debug__:
3354
        check_nodes = set(self.instance.all_nodes)
3355
        if self.op.remote_node_uuid:
3356
          check_nodes.add(self.op.remote_node_uuid)
3357
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3358
          owned = self.owned_locks(level)
3359
          assert not (check_nodes - owned), \
3360
            ("Not owning the correct locks, owning %r, expected at least %r" %
3361
             (owned, check_nodes))
3362

    
3363
      r_shut = ShutdownInstanceDisks(self, self.instance)
3364
      if not r_shut:
3365
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3366
                                 " proceed with disk template conversion")
3367
      mode = (self.instance.disk_template, self.op.disk_template)
3368
      try:
3369
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3370
      except:
3371
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3372
        raise
3373
      result.append(("disk_template", self.op.disk_template))
3374

    
3375
      assert self.instance.disk_template == self.op.disk_template, \
3376
        ("Expected disk template '%s', found '%s'" %
3377
         (self.op.disk_template, self.instance.disk_template))
3378

    
3379
    # Release node and resource locks if there are any (they might already have
3380
    # been released during disk conversion)
3381
    ReleaseLocks(self, locking.LEVEL_NODE)
3382
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3383

    
3384
    # Apply NIC changes
3385
    if self._new_nics is not None:
3386
      self.instance.nics = self._new_nics
3387
      result.extend(self._nic_chgdesc)
3388

    
3389
    # hvparams changes
3390
    if self.op.hvparams:
3391
      self.instance.hvparams = self.hv_inst
3392
      for key, val in self.op.hvparams.iteritems():
3393
        result.append(("hv/%s" % key, val))
3394

    
3395
    # beparams changes
3396
    if self.op.beparams:
3397
      self.instance.beparams = self.be_inst
3398
      for key, val in self.op.beparams.iteritems():
3399
        result.append(("be/%s" % key, val))
3400

    
3401
    # OS change
3402
    if self.op.os_name:
3403
      self.instance.os = self.op.os_name
3404

    
3405
    # osparams changes
3406
    if self.op.osparams:
3407
      self.instance.osparams = self.os_inst
3408
      for key, val in self.op.osparams.iteritems():
3409
        result.append(("os/%s" % key, val))
3410

    
3411
    if self.op.offline is None:
3412
      # Ignore
3413
      pass
3414
    elif self.op.offline:
3415
      # Mark instance as offline
3416
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3417
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3418
    else:
3419
      # Mark instance as online, but stopped
3420
      self.cfg.MarkInstanceDown(self.instance.uuid)
3421
      result.append(("admin_state", constants.ADMINST_DOWN))
3422

    
3423
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3424

    
3425
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3426
                self.owned_locks(locking.LEVEL_NODE)), \
3427
      "All node locks should have been released by now"
3428

    
3429
    return result
3430

    
3431
  _DISK_CONVERSIONS = {
3432
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3433
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3434
    }
3435

    
3436

    
3437
class LUInstanceChangeGroup(LogicalUnit):
3438
  HPATH = "instance-change-group"
3439
  HTYPE = constants.HTYPE_INSTANCE
3440
  REQ_BGL = False
3441

    
3442
  def ExpandNames(self):
3443
    self.share_locks = ShareAll()
3444

    
3445
    self.needed_locks = {
3446
      locking.LEVEL_NODEGROUP: [],
3447
      locking.LEVEL_NODE: [],
3448
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3449
      }
3450

    
3451
    self._ExpandAndLockInstance()
3452

    
3453
    if self.op.target_groups:
3454
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3455
                                  self.op.target_groups)
3456
    else:
3457
      self.req_target_uuids = None
3458

    
3459
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3460

    
3461
  def DeclareLocks(self, level):
3462
    if level == locking.LEVEL_NODEGROUP:
3463
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3464

    
3465
      if self.req_target_uuids:
3466
        lock_groups = set(self.req_target_uuids)
3467

    
3468
        # Lock all groups used by instance optimistically; this requires going
3469
        # via the node before it's locked, requiring verification later on
3470
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3471
        lock_groups.update(instance_groups)
3472
      else:
3473
        # No target groups, need to lock all of them
3474
        lock_groups = locking.ALL_SET
3475

    
3476
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3477

    
3478
    elif level == locking.LEVEL_NODE:
3479
      if self.req_target_uuids:
3480
        # Lock all nodes used by instances
3481
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3482
        self._LockInstancesNodes()
3483

    
3484
        # Lock all nodes in all potential target groups
3485
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3486
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3487
        member_nodes = [node_uuid
3488
                        for group in lock_groups
3489
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3490
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3491
      else:
3492
        # Lock all nodes as all groups are potential targets
3493
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3494

    
3495
  def CheckPrereq(self):
3496
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3497
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3498
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3499

    
3500
    assert (self.req_target_uuids is None or
3501
            owned_groups.issuperset(self.req_target_uuids))
3502
    assert owned_instance_names == set([self.op.instance_name])
3503

    
3504
    # Get instance information
3505
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3506

    
3507
    # Check if node groups for locked instance are still correct
3508
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3509
      ("Instance %s's nodes changed while we kept the lock" %
3510
       self.op.instance_name)
3511

    
3512
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3513
                                          owned_groups)
3514

    
3515
    if self.req_target_uuids:
3516
      # User requested specific target groups
3517
      self.target_uuids = frozenset(self.req_target_uuids)
3518
    else:
3519
      # All groups except those used by the instance are potential targets
3520
      self.target_uuids = owned_groups - inst_groups
3521

    
3522
    conflicting_groups = self.target_uuids & inst_groups
3523
    if conflicting_groups:
3524
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3525
                                 " used by the instance '%s'" %
3526
                                 (utils.CommaJoin(conflicting_groups),
3527
                                  self.op.instance_name),
3528
                                 errors.ECODE_INVAL)
3529

    
3530
    if not self.target_uuids:
3531
      raise errors.OpPrereqError("There are no possible target groups",
3532
                                 errors.ECODE_INVAL)
3533

    
3534
  def BuildHooksEnv(self):
3535
    """Build hooks env.
3536

3537
    """
3538
    assert self.target_uuids
3539

    
3540
    env = {
3541
      "TARGET_GROUPS": " ".join(self.target_uuids),
3542
      }
3543

    
3544
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3545

    
3546
    return env
3547

    
3548
  def BuildHooksNodes(self):
3549
    """Build hooks nodes.
3550

3551
    """
3552
    mn = self.cfg.GetMasterNode()
3553
    return ([mn], [mn])
3554

    
3555
  def Exec(self, feedback_fn):
3556
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3557

    
3558
    assert instances == [self.op.instance_name], "Instance not locked"
3559

    
3560
    req = iallocator.IAReqGroupChange(instances=instances,
3561
                                      target_groups=list(self.target_uuids))
3562
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3563

    
3564
    ial.Run(self.op.iallocator)
3565

    
3566
    if not ial.success:
3567
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3568
                                 " instance '%s' using iallocator '%s': %s" %
3569
                                 (self.op.instance_name, self.op.iallocator,
3570
                                  ial.info), errors.ECODE_NORES)
3571

    
3572
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3573

    
3574
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3575
                 " instance '%s'", len(jobs), self.op.instance_name)
3576

    
3577
    return ResultWithJobs(jobs)