Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ b7a990e3

History | View | Annotate | Download (143.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234

    
235
    check_params = cluster.SimpleFillNIC(nicparams)
236
    objects.NIC.CheckParameterSyntax(check_params)
237
    net_uuid = cfg.LookupNetwork(net)
238
    name = nic.get(constants.INIC_NAME, None)
239
    if name is not None and name.lower() == constants.VALUE_NONE:
240
      name = None
241
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242
                          network=net_uuid, nicparams=nicparams)
243
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
244
    nics.append(nic_obj)
245

    
246
  return nics
247

    
248

    
249
def _CheckForConflictingIp(lu, ip, node_uuid):
250
  """In case of conflicting IP address raise error.
251

252
  @type ip: string
253
  @param ip: IP address
254
  @type node_uuid: string
255
  @param node_uuid: node UUID
256

257
  """
258
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259
  if conf_net is not None:
260
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261
                                " network %s, but the target NIC does not." %
262
                                (ip, conf_net)),
263
                               errors.ECODE_STATE)
264

    
265
  return (None, None)
266

    
267

    
268
def _ComputeIPolicyInstanceSpecViolation(
269
  ipolicy, instance_spec, disk_template,
270
  _compute_fn=ComputeIPolicySpecViolation):
271
  """Compute if instance specs meets the specs of ipolicy.
272

273
  @type ipolicy: dict
274
  @param ipolicy: The ipolicy to verify against
275
  @param instance_spec: dict
276
  @param instance_spec: The instance spec to verify
277
  @type disk_template: string
278
  @param disk_template: the disk template of the instance
279
  @param _compute_fn: The function to verify ipolicy (unittest only)
280
  @see: L{ComputeIPolicySpecViolation}
281

282
  """
283
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289

    
290
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291
                     disk_sizes, spindle_use, disk_template)
292

    
293

    
294
def _CheckOSVariant(os_obj, name):
295
  """Check whether an OS name conforms to the os variants specification.
296

297
  @type os_obj: L{objects.OS}
298
  @param os_obj: OS object to check
299
  @type name: string
300
  @param name: OS name passed by the user, to check for validity
301

302
  """
303
  variant = objects.OS.GetVariant(name)
304
  if not os_obj.supported_variants:
305
    if variant:
306
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307
                                 " passed)" % (os_obj.name, variant),
308
                                 errors.ECODE_INVAL)
309
    return
310
  if not variant:
311
    raise errors.OpPrereqError("OS name must include a variant",
312
                               errors.ECODE_INVAL)
313

    
314
  if variant not in os_obj.supported_variants:
315
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316

    
317

    
318
class LUInstanceCreate(LogicalUnit):
319
  """Create an instance.
320

321
  """
322
  HPATH = "instance-add"
323
  HTYPE = constants.HTYPE_INSTANCE
324
  REQ_BGL = False
325

    
326
  def _CheckDiskTemplateValid(self):
327
    """Checks validity of disk template.
328

329
    """
330
    cluster = self.cfg.GetClusterInfo()
331
    if self.op.disk_template is None:
332
      # FIXME: It would be better to take the default disk template from the
333
      # ipolicy, but for the ipolicy we need the primary node, which we get from
334
      # the iallocator, which wants the disk template as input. To solve this
335
      # chicken-and-egg problem, it should be possible to specify just a node
336
      # group from the iallocator and take the ipolicy from that.
337
      self.op.disk_template = cluster.enabled_disk_templates[0]
338
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339

    
340
  def _CheckDiskArguments(self):
341
    """Checks validity of disk-related arguments.
342

343
    """
344
    # check that disk's names are unique and valid
345
    utils.ValidateDeviceNames("disk", self.op.disks)
346

    
347
    self._CheckDiskTemplateValid()
348

    
349
    # check disks. parameter names and consistent adopt/no-adopt strategy
350
    has_adopt = has_no_adopt = False
351
    for disk in self.op.disks:
352
      if self.op.disk_template != constants.DT_EXT:
353
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354
      if constants.IDISK_ADOPT in disk:
355
        has_adopt = True
356
      else:
357
        has_no_adopt = True
358
    if has_adopt and has_no_adopt:
359
      raise errors.OpPrereqError("Either all disks are adopted or none is",
360
                                 errors.ECODE_INVAL)
361
    if has_adopt:
362
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363
        raise errors.OpPrereqError("Disk adoption is not supported for the"
364
                                   " '%s' disk template" %
365
                                   self.op.disk_template,
366
                                   errors.ECODE_INVAL)
367
      if self.op.iallocator is not None:
368
        raise errors.OpPrereqError("Disk adoption not allowed with an"
369
                                   " iallocator script", errors.ECODE_INVAL)
370
      if self.op.mode == constants.INSTANCE_IMPORT:
371
        raise errors.OpPrereqError("Disk adoption not allowed for"
372
                                   " instance import", errors.ECODE_INVAL)
373
    else:
374
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
375
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376
                                   " but no 'adopt' parameter given" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379

    
380
    self.adopt_disks = has_adopt
381

    
382
  def CheckArguments(self):
383
    """Check arguments.
384

385
    """
386
    # do not require name_check to ease forward/backward compatibility
387
    # for tools
388
    if self.op.no_install and self.op.start:
389
      self.LogInfo("No-installation mode selected, disabling startup")
390
      self.op.start = False
391
    # validate/normalize the instance name
392
    self.op.instance_name = \
393
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
394

    
395
    if self.op.ip_check and not self.op.name_check:
396
      # TODO: make the ip check more flexible and not depend on the name check
397
      raise errors.OpPrereqError("Cannot do IP address check without a name"
398
                                 " check", errors.ECODE_INVAL)
399

    
400
    # check nics' parameter names
401
    for nic in self.op.nics:
402
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403
    # check that NIC's parameters names are unique and valid
404
    utils.ValidateDeviceNames("NIC", self.op.nics)
405

    
406
    self._CheckDiskArguments()
407

    
408
    # instance name verification
409
    if self.op.name_check:
410
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411
      self.op.instance_name = self.hostname.name
412
      # used in CheckPrereq for ip ping check
413
      self.check_ip = self.hostname.ip
414
    else:
415
      self.check_ip = None
416

    
417
    # file storage checks
418
    if (self.op.file_driver and
419
        not self.op.file_driver in constants.FILE_DRIVER):
420
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
421
                                 self.op.file_driver, errors.ECODE_INVAL)
422

    
423
    # set default file_driver if unset and required
424
    if (not self.op.file_driver and
425
        self.op.disk_template in [constants.DT_FILE,
426
                                  constants.DT_SHARED_FILE]):
427
      self.op.file_driver = constants.FD_DEFAULT
428

    
429
    ### Node/iallocator related checks
430
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
431

    
432
    if self.op.pnode is not None:
433
      if self.op.disk_template in constants.DTS_INT_MIRROR:
434
        if self.op.snode is None:
435
          raise errors.OpPrereqError("The networked disk templates need"
436
                                     " a mirror node", errors.ECODE_INVAL)
437
      elif self.op.snode:
438
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
439
                        " template")
440
        self.op.snode = None
441

    
442
    _CheckOpportunisticLocking(self.op)
443

    
444
    self._cds = GetClusterDomainSecret()
445

    
446
    if self.op.mode == constants.INSTANCE_IMPORT:
447
      # On import force_variant must be True, because if we forced it at
448
      # initial install, our only chance when importing it back is that it
449
      # works again!
450
      self.op.force_variant = True
451

    
452
      if self.op.no_install:
453
        self.LogInfo("No-installation mode has no effect during import")
454

    
455
    elif self.op.mode == constants.INSTANCE_CREATE:
456
      if self.op.os_type is None:
457
        raise errors.OpPrereqError("No guest OS specified",
458
                                   errors.ECODE_INVAL)
459
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
460
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
461
                                   " installation" % self.op.os_type,
462
                                   errors.ECODE_STATE)
463
      if self.op.disk_template is None:
464
        raise errors.OpPrereqError("No disk template specified",
465
                                   errors.ECODE_INVAL)
466

    
467
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
468
      # Check handshake to ensure both clusters have the same domain secret
469
      src_handshake = self.op.source_handshake
470
      if not src_handshake:
471
        raise errors.OpPrereqError("Missing source handshake",
472
                                   errors.ECODE_INVAL)
473

    
474
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
475
                                                           src_handshake)
476
      if errmsg:
477
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
478
                                   errors.ECODE_INVAL)
479

    
480
      # Load and check source CA
481
      self.source_x509_ca_pem = self.op.source_x509_ca
482
      if not self.source_x509_ca_pem:
483
        raise errors.OpPrereqError("Missing source X509 CA",
484
                                   errors.ECODE_INVAL)
485

    
486
      try:
487
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
488
                                                    self._cds)
489
      except OpenSSL.crypto.Error, err:
490
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
491
                                   (err, ), errors.ECODE_INVAL)
492

    
493
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
494
      if errcode is not None:
495
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
496
                                   errors.ECODE_INVAL)
497

    
498
      self.source_x509_ca = cert
499

    
500
      src_instance_name = self.op.source_instance_name
501
      if not src_instance_name:
502
        raise errors.OpPrereqError("Missing source instance name",
503
                                   errors.ECODE_INVAL)
504

    
505
      self.source_instance_name = \
506
        netutils.GetHostname(name=src_instance_name).name
507

    
508
    else:
509
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
510
                                 self.op.mode, errors.ECODE_INVAL)
511

    
512
  def ExpandNames(self):
513
    """ExpandNames for CreateInstance.
514

515
    Figure out the right locks for instance creation.
516

517
    """
518
    self.needed_locks = {}
519

    
520
    # this is just a preventive check, but someone might still add this
521
    # instance in the meantime, and creation will fail at lock-add time
522
    if self.op.instance_name in\
523
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
524
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
525
                                 self.op.instance_name, errors.ECODE_EXISTS)
526

    
527
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
528

    
529
    if self.op.iallocator:
530
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
531
      # specifying a group on instance creation and then selecting nodes from
532
      # that group
533
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
534
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
535

    
536
      if self.op.opportunistic_locking:
537
        self.opportunistic_locks[locking.LEVEL_NODE] = True
538
    else:
539
      (self.op.pnode_uuid, self.op.pnode) = \
540
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
541
      nodelist = [self.op.pnode_uuid]
542
      if self.op.snode is not None:
543
        (self.op.snode_uuid, self.op.snode) = \
544
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
545
        nodelist.append(self.op.snode_uuid)
546
      self.needed_locks[locking.LEVEL_NODE] = nodelist
547

    
548
    # in case of import lock the source node too
549
    if self.op.mode == constants.INSTANCE_IMPORT:
550
      src_node = self.op.src_node
551
      src_path = self.op.src_path
552

    
553
      if src_path is None:
554
        self.op.src_path = src_path = self.op.instance_name
555

    
556
      if src_node is None:
557
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
558
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
559
        self.op.src_node = None
560
        if os.path.isabs(src_path):
561
          raise errors.OpPrereqError("Importing an instance from a path"
562
                                     " requires a source node option",
563
                                     errors.ECODE_INVAL)
564
      else:
565
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
566
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
567
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
568
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
569
        if not os.path.isabs(src_path):
570
          self.op.src_path = \
571
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
572

    
573
    self.needed_locks[locking.LEVEL_NODE_RES] = \
574
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
575

    
576
    # Optimistically acquire shared group locks (we're reading the
577
    # configuration).  We can't just call GetInstanceNodeGroups, because the
578
    # instance doesn't exist yet. Therefore we lock all node groups of all
579
    # nodes we have.
580
    if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
581
      # In the case we lock all nodes for opportunistic allocation, we have no
582
      # choice than to lock all groups, because they're allocated before nodes.
583
      # This is sad, but true. At least we release all those we don't need in
584
      # CheckPrereq later.
585
      self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
586
    else:
587
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
588
        list(self.cfg.GetNodeGroupsFromNodes(
589
          self.needed_locks[locking.LEVEL_NODE]))
590
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
591

    
592
  def DeclareLocks(self, level):
593
    if level == locking.LEVEL_NODE_RES and \
594
      self.opportunistic_locks[locking.LEVEL_NODE]:
595
      # Even when using opportunistic locking, we require the same set of
596
      # NODE_RES locks as we got NODE locks
597
      self.needed_locks[locking.LEVEL_NODE_RES] = \
598
        self.owned_locks(locking.LEVEL_NODE)
599

    
600
  def _RunAllocator(self):
601
    """Run the allocator based on input opcode.
602

603
    """
604
    if self.op.opportunistic_locking:
605
      # Only consider nodes for which a lock is held
606
      node_name_whitelist = self.cfg.GetNodeNames(
607
        self.owned_locks(locking.LEVEL_NODE))
608
    else:
609
      node_name_whitelist = None
610

    
611
    #TODO Export network to iallocator so that it chooses a pnode
612
    #     in a nodegroup that has the desired network connected to
613
    req = _CreateInstanceAllocRequest(self.op, self.disks,
614
                                      self.nics, self.be_full,
615
                                      node_name_whitelist)
616
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
617

    
618
    ial.Run(self.op.iallocator)
619

    
620
    if not ial.success:
621
      # When opportunistic locks are used only a temporary failure is generated
622
      if self.op.opportunistic_locking:
623
        ecode = errors.ECODE_TEMP_NORES
624
      else:
625
        ecode = errors.ECODE_NORES
626

    
627
      raise errors.OpPrereqError("Can't compute nodes using"
628
                                 " iallocator '%s': %s" %
629
                                 (self.op.iallocator, ial.info),
630
                                 ecode)
631

    
632
    (self.op.pnode_uuid, self.op.pnode) = \
633
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
634
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
635
                 self.op.instance_name, self.op.iallocator,
636
                 utils.CommaJoin(ial.result))
637

    
638
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
639

    
640
    if req.RequiredNodes() == 2:
641
      (self.op.snode_uuid, self.op.snode) = \
642
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
643

    
644
  def BuildHooksEnv(self):
645
    """Build hooks env.
646

647
    This runs on master, primary and secondary nodes of the instance.
648

649
    """
650
    env = {
651
      "ADD_MODE": self.op.mode,
652
      }
653
    if self.op.mode == constants.INSTANCE_IMPORT:
654
      env["SRC_NODE"] = self.op.src_node
655
      env["SRC_PATH"] = self.op.src_path
656
      env["SRC_IMAGES"] = self.src_images
657

    
658
    env.update(BuildInstanceHookEnv(
659
      name=self.op.instance_name,
660
      primary_node_name=self.op.pnode,
661
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
662
      status=self.op.start,
663
      os_type=self.op.os_type,
664
      minmem=self.be_full[constants.BE_MINMEM],
665
      maxmem=self.be_full[constants.BE_MAXMEM],
666
      vcpus=self.be_full[constants.BE_VCPUS],
667
      nics=NICListToTuple(self, self.nics),
668
      disk_template=self.op.disk_template,
669
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
670
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
671
             for d in self.disks],
672
      bep=self.be_full,
673
      hvp=self.hv_full,
674
      hypervisor_name=self.op.hypervisor,
675
      tags=self.op.tags,
676
      ))
677

    
678
    return env
679

    
680
  def BuildHooksNodes(self):
681
    """Build hooks nodes.
682

683
    """
684
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
685
    return nl, nl
686

    
687
  def _ReadExportInfo(self):
688
    """Reads the export information from disk.
689

690
    It will override the opcode source node and path with the actual
691
    information, if these two were not specified before.
692

693
    @return: the export information
694

695
    """
696
    assert self.op.mode == constants.INSTANCE_IMPORT
697

    
698
    if self.op.src_node_uuid is None:
699
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
700
      exp_list = self.rpc.call_export_list(locked_nodes)
701
      found = False
702
      for node_uuid in exp_list:
703
        if exp_list[node_uuid].fail_msg:
704
          continue
705
        if self.op.src_path in exp_list[node_uuid].payload:
706
          found = True
707
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
708
          self.op.src_node_uuid = node_uuid
709
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
710
                                            self.op.src_path)
711
          break
712
      if not found:
713
        raise errors.OpPrereqError("No export found for relative path %s" %
714
                                   self.op.src_path, errors.ECODE_INVAL)
715

    
716
    CheckNodeOnline(self, self.op.src_node_uuid)
717
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
718
    result.Raise("No export or invalid export found in dir %s" %
719
                 self.op.src_path)
720

    
721
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
722
    if not export_info.has_section(constants.INISECT_EXP):
723
      raise errors.ProgrammerError("Corrupted export config",
724
                                   errors.ECODE_ENVIRON)
725

    
726
    ei_version = export_info.get(constants.INISECT_EXP, "version")
727
    if int(ei_version) != constants.EXPORT_VERSION:
728
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
729
                                 (ei_version, constants.EXPORT_VERSION),
730
                                 errors.ECODE_ENVIRON)
731
    return export_info
732

    
733
  def _ReadExportParams(self, einfo):
734
    """Use export parameters as defaults.
735

736
    In case the opcode doesn't specify (as in override) some instance
737
    parameters, then try to use them from the export information, if
738
    that declares them.
739

740
    """
741
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
742

    
743
    if not self.op.disks:
744
      disks = []
745
      # TODO: import the disk iv_name too
746
      for idx in range(constants.MAX_DISKS):
747
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
748
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
749
          disks.append({constants.IDISK_SIZE: disk_sz})
750
      self.op.disks = disks
751
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
752
        raise errors.OpPrereqError("No disk info specified and the export"
753
                                   " is missing the disk information",
754
                                   errors.ECODE_INVAL)
755

    
756
    if not self.op.nics:
757
      nics = []
758
      for idx in range(constants.MAX_NICS):
759
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
760
          ndict = {}
761
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
762
            nic_param_name = "nic%d_%s" % (idx, name)
763
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
764
              v = einfo.get(constants.INISECT_INS, nic_param_name)
765
              ndict[name] = v
766
          nics.append(ndict)
767
        else:
768
          break
769
      self.op.nics = nics
770

    
771
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
772
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
773

    
774
    if (self.op.hypervisor is None and
775
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
776
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
777

    
778
    if einfo.has_section(constants.INISECT_HYP):
779
      # use the export parameters but do not override the ones
780
      # specified by the user
781
      for name, value in einfo.items(constants.INISECT_HYP):
782
        if name not in self.op.hvparams:
783
          self.op.hvparams[name] = value
784

    
785
    if einfo.has_section(constants.INISECT_BEP):
786
      # use the parameters, without overriding
787
      for name, value in einfo.items(constants.INISECT_BEP):
788
        if name not in self.op.beparams:
789
          self.op.beparams[name] = value
790
        # Compatibility for the old "memory" be param
791
        if name == constants.BE_MEMORY:
792
          if constants.BE_MAXMEM not in self.op.beparams:
793
            self.op.beparams[constants.BE_MAXMEM] = value
794
          if constants.BE_MINMEM not in self.op.beparams:
795
            self.op.beparams[constants.BE_MINMEM] = value
796
    else:
797
      # try to read the parameters old style, from the main section
798
      for name in constants.BES_PARAMETERS:
799
        if (name not in self.op.beparams and
800
            einfo.has_option(constants.INISECT_INS, name)):
801
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
802

    
803
    if einfo.has_section(constants.INISECT_OSP):
804
      # use the parameters, without overriding
805
      for name, value in einfo.items(constants.INISECT_OSP):
806
        if name not in self.op.osparams:
807
          self.op.osparams[name] = value
808

    
809
  def _RevertToDefaults(self, cluster):
810
    """Revert the instance parameters to the default values.
811

812
    """
813
    # hvparams
814
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
815
    for name in self.op.hvparams.keys():
816
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
817
        del self.op.hvparams[name]
818
    # beparams
819
    be_defs = cluster.SimpleFillBE({})
820
    for name in self.op.beparams.keys():
821
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
822
        del self.op.beparams[name]
823
    # nic params
824
    nic_defs = cluster.SimpleFillNIC({})
825
    for nic in self.op.nics:
826
      for name in constants.NICS_PARAMETERS:
827
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
828
          del nic[name]
829
    # osparams
830
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
831
    for name in self.op.osparams.keys():
832
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
833
        del self.op.osparams[name]
834

    
835
  def _CalculateFileStorageDir(self):
836
    """Calculate final instance file storage dir.
837

838
    """
839
    # file storage dir calculation/check
840
    self.instance_file_storage_dir = None
841
    if self.op.disk_template in constants.DTS_FILEBASED:
842
      # build the full file storage dir path
843
      joinargs = []
844

    
845
      if self.op.disk_template == constants.DT_SHARED_FILE:
846
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
847
      else:
848
        get_fsd_fn = self.cfg.GetFileStorageDir
849

    
850
      cfg_storagedir = get_fsd_fn()
851
      if not cfg_storagedir:
852
        raise errors.OpPrereqError("Cluster file storage dir not defined",
853
                                   errors.ECODE_STATE)
854
      joinargs.append(cfg_storagedir)
855

    
856
      if self.op.file_storage_dir is not None:
857
        joinargs.append(self.op.file_storage_dir)
858

    
859
      joinargs.append(self.op.instance_name)
860

    
861
      # pylint: disable=W0142
862
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
863

    
864
  def CheckPrereq(self): # pylint: disable=R0914
865
    """Check prerequisites.
866

867
    """
868
    # Check that the optimistically acquired groups are correct wrt the
869
    # acquired nodes
870
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
871
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
872
    cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
873
    if not owned_groups.issuperset(cur_groups):
874
      raise errors.OpPrereqError("New instance %s's node groups changed since"
875
                                 " locks were acquired, current groups are"
876
                                 " are '%s', owning groups '%s'; retry the"
877
                                 " operation" %
878
                                 (self.op.instance_name,
879
                                  utils.CommaJoin(cur_groups),
880
                                  utils.CommaJoin(owned_groups)),
881
                                 errors.ECODE_STATE)
882

    
883
    self._CalculateFileStorageDir()
884

    
885
    if self.op.mode == constants.INSTANCE_IMPORT:
886
      export_info = self._ReadExportInfo()
887
      self._ReadExportParams(export_info)
888
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
889
    else:
890
      self._old_instance_name = None
891

    
892
    if (not self.cfg.GetVGName() and
893
        self.op.disk_template not in constants.DTS_NOT_LVM):
894
      raise errors.OpPrereqError("Cluster does not support lvm-based"
895
                                 " instances", errors.ECODE_STATE)
896

    
897
    if (self.op.hypervisor is None or
898
        self.op.hypervisor == constants.VALUE_AUTO):
899
      self.op.hypervisor = self.cfg.GetHypervisorType()
900

    
901
    cluster = self.cfg.GetClusterInfo()
902
    enabled_hvs = cluster.enabled_hypervisors
903
    if self.op.hypervisor not in enabled_hvs:
904
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
905
                                 " cluster (%s)" %
906
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
907
                                 errors.ECODE_STATE)
908

    
909
    # Check tag validity
910
    for tag in self.op.tags:
911
      objects.TaggableObject.ValidateTag(tag)
912

    
913
    # check hypervisor parameter syntax (locally)
914
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
915
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
916
                                      self.op.hvparams)
917
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
918
    hv_type.CheckParameterSyntax(filled_hvp)
919
    self.hv_full = filled_hvp
920
    # check that we don't specify global parameters on an instance
921
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
922
                         "instance", "cluster")
923

    
924
    # fill and remember the beparams dict
925
    self.be_full = _ComputeFullBeParams(self.op, cluster)
926

    
927
    # build os parameters
928
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
929

    
930
    # now that hvp/bep are in final format, let's reset to defaults,
931
    # if told to do so
932
    if self.op.identify_defaults:
933
      self._RevertToDefaults(cluster)
934

    
935
    # NIC buildup
936
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
937
                             self.proc.GetECId())
938

    
939
    # disk checks/pre-build
940
    default_vg = self.cfg.GetVGName()
941
    self.disks = ComputeDisks(self.op, default_vg)
942

    
943
    if self.op.mode == constants.INSTANCE_IMPORT:
944
      disk_images = []
945
      for idx in range(len(self.disks)):
946
        option = "disk%d_dump" % idx
947
        if export_info.has_option(constants.INISECT_INS, option):
948
          # FIXME: are the old os-es, disk sizes, etc. useful?
949
          export_name = export_info.get(constants.INISECT_INS, option)
950
          image = utils.PathJoin(self.op.src_path, export_name)
951
          disk_images.append(image)
952
        else:
953
          disk_images.append(False)
954

    
955
      self.src_images = disk_images
956

    
957
      if self.op.instance_name == self._old_instance_name:
958
        for idx, nic in enumerate(self.nics):
959
          if nic.mac == constants.VALUE_AUTO:
960
            nic_mac_ini = "nic%d_mac" % idx
961
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
962

    
963
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
964

    
965
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
966
    if self.op.ip_check:
967
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
968
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
969
                                   (self.check_ip, self.op.instance_name),
970
                                   errors.ECODE_NOTUNIQUE)
971

    
972
    #### mac address generation
973
    # By generating here the mac address both the allocator and the hooks get
974
    # the real final mac address rather than the 'auto' or 'generate' value.
975
    # There is a race condition between the generation and the instance object
976
    # creation, which means that we know the mac is valid now, but we're not
977
    # sure it will be when we actually add the instance. If things go bad
978
    # adding the instance will abort because of a duplicate mac, and the
979
    # creation job will fail.
980
    for nic in self.nics:
981
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
982
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
983

    
984
    #### allocator run
985

    
986
    if self.op.iallocator is not None:
987
      self._RunAllocator()
988

    
989
    # Release all unneeded node locks
990
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
991
                               self.op.src_node_uuid])
992
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
993
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
994
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
995
    # Release all unneeded group locks
996
    ReleaseLocks(self, locking.LEVEL_NODEGROUP,
997
                 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
998

    
999
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1000
            self.owned_locks(locking.LEVEL_NODE_RES)), \
1001
      "Node locks differ from node resource locks"
1002

    
1003
    #### node related checks
1004

    
1005
    # check primary node
1006
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1007
    assert self.pnode is not None, \
1008
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
1009
    if pnode.offline:
1010
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1011
                                 pnode.name, errors.ECODE_STATE)
1012
    if pnode.drained:
1013
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1014
                                 pnode.name, errors.ECODE_STATE)
1015
    if not pnode.vm_capable:
1016
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1017
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
1018

    
1019
    self.secondaries = []
1020

    
1021
    # Fill in any IPs from IP pools. This must happen here, because we need to
1022
    # know the nic's primary node, as specified by the iallocator
1023
    for idx, nic in enumerate(self.nics):
1024
      net_uuid = nic.network
1025
      if net_uuid is not None:
1026
        nobj = self.cfg.GetNetwork(net_uuid)
1027
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1028
        if netparams is None:
1029
          raise errors.OpPrereqError("No netparams found for network"
1030
                                     " %s. Propably not connected to"
1031
                                     " node's %s nodegroup" %
1032
                                     (nobj.name, self.pnode.name),
1033
                                     errors.ECODE_INVAL)
1034
        self.LogInfo("NIC/%d inherits netparams %s" %
1035
                     (idx, netparams.values()))
1036
        nic.nicparams = dict(netparams)
1037
        if nic.ip is not None:
1038
          if nic.ip.lower() == constants.NIC_IP_POOL:
1039
            try:
1040
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1041
            except errors.ReservationError:
1042
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1043
                                         " from the address pool" % idx,
1044
                                         errors.ECODE_STATE)
1045
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1046
          else:
1047
            try:
1048
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1049
            except errors.ReservationError:
1050
              raise errors.OpPrereqError("IP address %s already in use"
1051
                                         " or does not belong to network %s" %
1052
                                         (nic.ip, nobj.name),
1053
                                         errors.ECODE_NOTUNIQUE)
1054

    
1055
      # net is None, ip None or given
1056
      elif self.op.conflicts_check:
1057
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1058

    
1059
    # mirror node verification
1060
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1061
      if self.op.snode_uuid == pnode.uuid:
1062
        raise errors.OpPrereqError("The secondary node cannot be the"
1063
                                   " primary node", errors.ECODE_INVAL)
1064
      CheckNodeOnline(self, self.op.snode_uuid)
1065
      CheckNodeNotDrained(self, self.op.snode_uuid)
1066
      CheckNodeVmCapable(self, self.op.snode_uuid)
1067
      self.secondaries.append(self.op.snode_uuid)
1068

    
1069
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1070
      if pnode.group != snode.group:
1071
        self.LogWarning("The primary and secondary nodes are in two"
1072
                        " different node groups; the disk parameters"
1073
                        " from the first disk's node group will be"
1074
                        " used")
1075

    
1076
    nodes = [pnode]
1077
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1078
      nodes.append(snode)
1079
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1080
    excl_stor = compat.any(map(has_es, nodes))
1081
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1082
      raise errors.OpPrereqError("Disk template %s not supported with"
1083
                                 " exclusive storage" % self.op.disk_template,
1084
                                 errors.ECODE_STATE)
1085
    for disk in self.disks:
1086
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1087

    
1088
    node_uuids = [pnode.uuid] + self.secondaries
1089

    
1090
    if not self.adopt_disks:
1091
      if self.op.disk_template == constants.DT_RBD:
1092
        # _CheckRADOSFreeSpace() is just a placeholder.
1093
        # Any function that checks prerequisites can be placed here.
1094
        # Check if there is enough space on the RADOS cluster.
1095
        CheckRADOSFreeSpace()
1096
      elif self.op.disk_template == constants.DT_EXT:
1097
        # FIXME: Function that checks prereqs if needed
1098
        pass
1099
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1100
        # Check lv size requirements, if not adopting
1101
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1102
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1103
      else:
1104
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1105
        pass
1106

    
1107
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1108
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1109
                                disk[constants.IDISK_ADOPT])
1110
                     for disk in self.disks])
1111
      if len(all_lvs) != len(self.disks):
1112
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1113
                                   errors.ECODE_INVAL)
1114
      for lv_name in all_lvs:
1115
        try:
1116
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1117
          # to ReserveLV uses the same syntax
1118
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1119
        except errors.ReservationError:
1120
          raise errors.OpPrereqError("LV named %s used by another instance" %
1121
                                     lv_name, errors.ECODE_NOTUNIQUE)
1122

    
1123
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1124
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1125

    
1126
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1127
                                       vg_names.payload.keys())[pnode.uuid]
1128
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1129
      node_lvs = node_lvs.payload
1130

    
1131
      delta = all_lvs.difference(node_lvs.keys())
1132
      if delta:
1133
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1134
                                   utils.CommaJoin(delta),
1135
                                   errors.ECODE_INVAL)
1136
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1137
      if online_lvs:
1138
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1139
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1140
                                   errors.ECODE_STATE)
1141
      # update the size of disk based on what is found
1142
      for dsk in self.disks:
1143
        dsk[constants.IDISK_SIZE] = \
1144
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1145
                                        dsk[constants.IDISK_ADOPT])][0]))
1146

    
1147
    elif self.op.disk_template == constants.DT_BLOCK:
1148
      # Normalize and de-duplicate device paths
1149
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1150
                       for disk in self.disks])
1151
      if len(all_disks) != len(self.disks):
1152
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1153
                                   errors.ECODE_INVAL)
1154
      baddisks = [d for d in all_disks
1155
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1156
      if baddisks:
1157
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1158
                                   " cannot be adopted" %
1159
                                   (utils.CommaJoin(baddisks),
1160
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1161
                                   errors.ECODE_INVAL)
1162

    
1163
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1164
                                            list(all_disks))[pnode.uuid]
1165
      node_disks.Raise("Cannot get block device information from node %s" %
1166
                       pnode.name)
1167
      node_disks = node_disks.payload
1168
      delta = all_disks.difference(node_disks.keys())
1169
      if delta:
1170
        raise errors.OpPrereqError("Missing block device(s): %s" %
1171
                                   utils.CommaJoin(delta),
1172
                                   errors.ECODE_INVAL)
1173
      for dsk in self.disks:
1174
        dsk[constants.IDISK_SIZE] = \
1175
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1176

    
1177
    # Verify instance specs
1178
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1179
    ispec = {
1180
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1181
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1182
      constants.ISPEC_DISK_COUNT: len(self.disks),
1183
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1184
                                  for disk in self.disks],
1185
      constants.ISPEC_NIC_COUNT: len(self.nics),
1186
      constants.ISPEC_SPINDLE_USE: spindle_use,
1187
      }
1188

    
1189
    group_info = self.cfg.GetNodeGroup(pnode.group)
1190
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1191
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1192
                                               self.op.disk_template)
1193
    if not self.op.ignore_ipolicy and res:
1194
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1195
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1196
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1197

    
1198
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1199

    
1200
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1201
    # check OS parameters (remotely)
1202
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1203

    
1204
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1205

    
1206
    #TODO: _CheckExtParams (remotely)
1207
    # Check parameters for extstorage
1208

    
1209
    # memory check on primary node
1210
    #TODO(dynmem): use MINMEM for checking
1211
    if self.op.start:
1212
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1213
                                self.op.hvparams)
1214
      CheckNodeFreeMemory(self, self.pnode.uuid,
1215
                          "creating instance %s" % self.op.instance_name,
1216
                          self.be_full[constants.BE_MAXMEM],
1217
                          self.op.hypervisor, hvfull)
1218

    
1219
    self.dry_run_result = list(node_uuids)
1220

    
1221
  def Exec(self, feedback_fn):
1222
    """Create and add the instance to the cluster.
1223

1224
    """
1225
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1226
                self.owned_locks(locking.LEVEL_NODE)), \
1227
      "Node locks differ from node resource locks"
1228
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1229

    
1230
    ht_kind = self.op.hypervisor
1231
    if ht_kind in constants.HTS_REQ_PORT:
1232
      network_port = self.cfg.AllocatePort()
1233
    else:
1234
      network_port = None
1235

    
1236
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1237

    
1238
    # This is ugly but we got a chicken-egg problem here
1239
    # We can only take the group disk parameters, as the instance
1240
    # has no disks yet (we are generating them right here).
1241
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1242
    disks = GenerateDiskTemplate(self,
1243
                                 self.op.disk_template,
1244
                                 instance_uuid, self.pnode.uuid,
1245
                                 self.secondaries,
1246
                                 self.disks,
1247
                                 self.instance_file_storage_dir,
1248
                                 self.op.file_driver,
1249
                                 0,
1250
                                 feedback_fn,
1251
                                 self.cfg.GetGroupDiskParams(nodegroup))
1252

    
1253
    iobj = objects.Instance(name=self.op.instance_name,
1254
                            uuid=instance_uuid,
1255
                            os=self.op.os_type,
1256
                            primary_node=self.pnode.uuid,
1257
                            nics=self.nics, disks=disks,
1258
                            disk_template=self.op.disk_template,
1259
                            disks_active=False,
1260
                            admin_state=constants.ADMINST_DOWN,
1261
                            network_port=network_port,
1262
                            beparams=self.op.beparams,
1263
                            hvparams=self.op.hvparams,
1264
                            hypervisor=self.op.hypervisor,
1265
                            osparams=self.op.osparams,
1266
                            )
1267

    
1268
    if self.op.tags:
1269
      for tag in self.op.tags:
1270
        iobj.AddTag(tag)
1271

    
1272
    if self.adopt_disks:
1273
      if self.op.disk_template == constants.DT_PLAIN:
1274
        # rename LVs to the newly-generated names; we need to construct
1275
        # 'fake' LV disks with the old data, plus the new unique_id
1276
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1277
        rename_to = []
1278
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1279
          rename_to.append(t_dsk.logical_id)
1280
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1281
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1282
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1283
                                               zip(tmp_disks, rename_to))
1284
        result.Raise("Failed to rename adoped LVs")
1285
    else:
1286
      feedback_fn("* creating instance disks...")
1287
      try:
1288
        CreateDisks(self, iobj)
1289
      except errors.OpExecError:
1290
        self.LogWarning("Device creation failed")
1291
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1292
        raise
1293

    
1294
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1295

    
1296
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1297

    
1298
    # Declare that we don't want to remove the instance lock anymore, as we've
1299
    # added the instance to the config
1300
    del self.remove_locks[locking.LEVEL_INSTANCE]
1301

    
1302
    if self.op.mode == constants.INSTANCE_IMPORT:
1303
      # Release unused nodes
1304
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1305
    else:
1306
      # Release all nodes
1307
      ReleaseLocks(self, locking.LEVEL_NODE)
1308

    
1309
    disk_abort = False
1310
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1311
      feedback_fn("* wiping instance disks...")
1312
      try:
1313
        WipeDisks(self, iobj)
1314
      except errors.OpExecError, err:
1315
        logging.exception("Wiping disks failed")
1316
        self.LogWarning("Wiping instance disks failed (%s)", err)
1317
        disk_abort = True
1318

    
1319
    if disk_abort:
1320
      # Something is already wrong with the disks, don't do anything else
1321
      pass
1322
    elif self.op.wait_for_sync:
1323
      disk_abort = not WaitForSync(self, iobj)
1324
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1325
      # make sure the disks are not degraded (still sync-ing is ok)
1326
      feedback_fn("* checking mirrors status")
1327
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1328
    else:
1329
      disk_abort = False
1330

    
1331
    if disk_abort:
1332
      RemoveDisks(self, iobj)
1333
      self.cfg.RemoveInstance(iobj.uuid)
1334
      # Make sure the instance lock gets removed
1335
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1336
      raise errors.OpExecError("There are some degraded disks for"
1337
                               " this instance")
1338

    
1339
    # instance disks are now active
1340
    iobj.disks_active = True
1341

    
1342
    # Release all node resource locks
1343
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1344

    
1345
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1346
      # we need to set the disks ID to the primary node, since the
1347
      # preceding code might or might have not done it, depending on
1348
      # disk template and other options
1349
      for disk in iobj.disks:
1350
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1351
      if self.op.mode == constants.INSTANCE_CREATE:
1352
        if not self.op.no_install:
1353
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1354
                        not self.op.wait_for_sync)
1355
          if pause_sync:
1356
            feedback_fn("* pausing disk sync to install instance OS")
1357
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1358
                                                              (iobj.disks,
1359
                                                               iobj), True)
1360
            for idx, success in enumerate(result.payload):
1361
              if not success:
1362
                logging.warn("pause-sync of instance %s for disk %d failed",
1363
                             self.op.instance_name, idx)
1364

    
1365
          feedback_fn("* running the instance OS create scripts...")
1366
          # FIXME: pass debug option from opcode to backend
1367
          os_add_result = \
1368
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1369
                                          self.op.debug_level)
1370
          if pause_sync:
1371
            feedback_fn("* resuming disk sync")
1372
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1373
                                                              (iobj.disks,
1374
                                                               iobj), False)
1375
            for idx, success in enumerate(result.payload):
1376
              if not success:
1377
                logging.warn("resume-sync of instance %s for disk %d failed",
1378
                             self.op.instance_name, idx)
1379

    
1380
          os_add_result.Raise("Could not add os for instance %s"
1381
                              " on node %s" % (self.op.instance_name,
1382
                                               self.pnode.name))
1383

    
1384
      else:
1385
        if self.op.mode == constants.INSTANCE_IMPORT:
1386
          feedback_fn("* running the instance OS import scripts...")
1387

    
1388
          transfers = []
1389

    
1390
          for idx, image in enumerate(self.src_images):
1391
            if not image:
1392
              continue
1393

    
1394
            # FIXME: pass debug option from opcode to backend
1395
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1396
                                               constants.IEIO_FILE, (image, ),
1397
                                               constants.IEIO_SCRIPT,
1398
                                               (iobj.disks[idx], idx),
1399
                                               None)
1400
            transfers.append(dt)
1401

    
1402
          import_result = \
1403
            masterd.instance.TransferInstanceData(self, feedback_fn,
1404
                                                  self.op.src_node_uuid,
1405
                                                  self.pnode.uuid,
1406
                                                  self.pnode.secondary_ip,
1407
                                                  iobj, transfers)
1408
          if not compat.all(import_result):
1409
            self.LogWarning("Some disks for instance %s on node %s were not"
1410
                            " imported successfully" % (self.op.instance_name,
1411
                                                        self.pnode.name))
1412

    
1413
          rename_from = self._old_instance_name
1414

    
1415
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1416
          feedback_fn("* preparing remote import...")
1417
          # The source cluster will stop the instance before attempting to make
1418
          # a connection. In some cases stopping an instance can take a long
1419
          # time, hence the shutdown timeout is added to the connection
1420
          # timeout.
1421
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1422
                             self.op.source_shutdown_timeout)
1423
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1424

    
1425
          assert iobj.primary_node == self.pnode.uuid
1426
          disk_results = \
1427
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1428
                                          self.source_x509_ca,
1429
                                          self._cds, timeouts)
1430
          if not compat.all(disk_results):
1431
            # TODO: Should the instance still be started, even if some disks
1432
            # failed to import (valid for local imports, too)?
1433
            self.LogWarning("Some disks for instance %s on node %s were not"
1434
                            " imported successfully" % (self.op.instance_name,
1435
                                                        self.pnode.name))
1436

    
1437
          rename_from = self.source_instance_name
1438

    
1439
        else:
1440
          # also checked in the prereq part
1441
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1442
                                       % self.op.mode)
1443

    
1444
        # Run rename script on newly imported instance
1445
        assert iobj.name == self.op.instance_name
1446
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1447
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1448
                                                   rename_from,
1449
                                                   self.op.debug_level)
1450
        result.Warn("Failed to run rename script for %s on node %s" %
1451
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1452

    
1453
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1454

    
1455
    if self.op.start:
1456
      iobj.admin_state = constants.ADMINST_UP
1457
      self.cfg.Update(iobj, feedback_fn)
1458
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1459
                   self.pnode.name)
1460
      feedback_fn("* starting instance...")
1461
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1462
                                            False, self.op.reason)
1463
      result.Raise("Could not start instance")
1464

    
1465
    return self.cfg.GetNodeNames(list(iobj.all_nodes))
1466

    
1467

    
1468
class LUInstanceRename(LogicalUnit):
1469
  """Rename an instance.
1470

1471
  """
1472
  HPATH = "instance-rename"
1473
  HTYPE = constants.HTYPE_INSTANCE
1474

    
1475
  def CheckArguments(self):
1476
    """Check arguments.
1477

1478
    """
1479
    if self.op.ip_check and not self.op.name_check:
1480
      # TODO: make the ip check more flexible and not depend on the name check
1481
      raise errors.OpPrereqError("IP address check requires a name check",
1482
                                 errors.ECODE_INVAL)
1483

    
1484
  def BuildHooksEnv(self):
1485
    """Build hooks env.
1486

1487
    This runs on master, primary and secondary nodes of the instance.
1488

1489
    """
1490
    env = BuildInstanceHookEnvByObject(self, self.instance)
1491
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1492
    return env
1493

    
1494
  def BuildHooksNodes(self):
1495
    """Build hooks nodes.
1496

1497
    """
1498
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1499
    return (nl, nl)
1500

    
1501
  def CheckPrereq(self):
1502
    """Check prerequisites.
1503

1504
    This checks that the instance is in the cluster and is not running.
1505

1506
    """
1507
    (self.op.instance_uuid, self.op.instance_name) = \
1508
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1509
                                self.op.instance_name)
1510
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1511
    assert instance is not None
1512

    
1513
    # It should actually not happen that an instance is running with a disabled
1514
    # disk template, but in case it does, the renaming of file-based instances
1515
    # will fail horribly. Thus, we test it before.
1516
    if (instance.disk_template in constants.DTS_FILEBASED and
1517
        self.op.new_name != instance.name):
1518
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1519
                               instance.disk_template)
1520

    
1521
    CheckNodeOnline(self, instance.primary_node)
1522
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1523
                       msg="cannot rename")
1524
    self.instance = instance
1525

    
1526
    new_name = self.op.new_name
1527
    if self.op.name_check:
1528
      hostname = _CheckHostnameSane(self, new_name)
1529
      new_name = self.op.new_name = hostname.name
1530
      if (self.op.ip_check and
1531
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1532
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1533
                                   (hostname.ip, new_name),
1534
                                   errors.ECODE_NOTUNIQUE)
1535

    
1536
    instance_names = [inst.name for
1537
                      inst in self.cfg.GetAllInstancesInfo().values()]
1538
    if new_name in instance_names and new_name != instance.name:
1539
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1540
                                 new_name, errors.ECODE_EXISTS)
1541

    
1542
  def Exec(self, feedback_fn):
1543
    """Rename the instance.
1544

1545
    """
1546
    old_name = self.instance.name
1547

    
1548
    rename_file_storage = False
1549
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1550
        self.op.new_name != self.instance.name):
1551
      old_file_storage_dir = os.path.dirname(
1552
                               self.instance.disks[0].logical_id[1])
1553
      rename_file_storage = True
1554

    
1555
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1556
    # Change the instance lock. This is definitely safe while we hold the BGL.
1557
    # Otherwise the new lock would have to be added in acquired mode.
1558
    assert self.REQ_BGL
1559
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1560
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1561
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1562

    
1563
    # re-read the instance from the configuration after rename
1564
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1565

    
1566
    if rename_file_storage:
1567
      new_file_storage_dir = os.path.dirname(
1568
                               renamed_inst.disks[0].logical_id[1])
1569
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1570
                                                     old_file_storage_dir,
1571
                                                     new_file_storage_dir)
1572
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1573
                   " (but the instance has been renamed in Ganeti)" %
1574
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1575
                    old_file_storage_dir, new_file_storage_dir))
1576

    
1577
    StartInstanceDisks(self, renamed_inst, None)
1578
    # update info on disks
1579
    info = GetInstanceInfoText(renamed_inst)
1580
    for (idx, disk) in enumerate(renamed_inst.disks):
1581
      for node_uuid in renamed_inst.all_nodes:
1582
        self.cfg.SetDiskID(disk, node_uuid)
1583
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1584
        result.Warn("Error setting info on node %s for disk %s" %
1585
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1586
    try:
1587
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1588
                                                 renamed_inst, old_name,
1589
                                                 self.op.debug_level)
1590
      result.Warn("Could not run OS rename script for instance %s on node %s"
1591
                  " (but the instance has been renamed in Ganeti)" %
1592
                  (renamed_inst.name,
1593
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1594
                  self.LogWarning)
1595
    finally:
1596
      ShutdownInstanceDisks(self, renamed_inst)
1597

    
1598
    return renamed_inst.name
1599

    
1600

    
1601
class LUInstanceRemove(LogicalUnit):
1602
  """Remove an instance.
1603

1604
  """
1605
  HPATH = "instance-remove"
1606
  HTYPE = constants.HTYPE_INSTANCE
1607
  REQ_BGL = False
1608

    
1609
  def ExpandNames(self):
1610
    self._ExpandAndLockInstance()
1611
    self.needed_locks[locking.LEVEL_NODE] = []
1612
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1613
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1614

    
1615
  def DeclareLocks(self, level):
1616
    if level == locking.LEVEL_NODE:
1617
      self._LockInstancesNodes()
1618
    elif level == locking.LEVEL_NODE_RES:
1619
      # Copy node locks
1620
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1621
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1622

    
1623
  def BuildHooksEnv(self):
1624
    """Build hooks env.
1625

1626
    This runs on master, primary and secondary nodes of the instance.
1627

1628
    """
1629
    env = BuildInstanceHookEnvByObject(self, self.instance)
1630
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1631
    return env
1632

    
1633
  def BuildHooksNodes(self):
1634
    """Build hooks nodes.
1635

1636
    """
1637
    nl = [self.cfg.GetMasterNode()]
1638
    nl_post = list(self.instance.all_nodes) + nl
1639
    return (nl, nl_post)
1640

    
1641
  def CheckPrereq(self):
1642
    """Check prerequisites.
1643

1644
    This checks that the instance is in the cluster.
1645

1646
    """
1647
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1648
    assert self.instance is not None, \
1649
      "Cannot retrieve locked instance %s" % self.op.instance_name
1650

    
1651
  def Exec(self, feedback_fn):
1652
    """Remove the instance.
1653

1654
    """
1655
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1656
                 self.cfg.GetNodeName(self.instance.primary_node))
1657

    
1658
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1659
                                             self.instance,
1660
                                             self.op.shutdown_timeout,
1661
                                             self.op.reason)
1662
    if self.op.ignore_failures:
1663
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1664
    else:
1665
      result.Raise("Could not shutdown instance %s on node %s" %
1666
                   (self.instance.name,
1667
                    self.cfg.GetNodeName(self.instance.primary_node)))
1668

    
1669
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1670
            self.owned_locks(locking.LEVEL_NODE_RES))
1671
    assert not (set(self.instance.all_nodes) -
1672
                self.owned_locks(locking.LEVEL_NODE)), \
1673
      "Not owning correct locks"
1674

    
1675
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1676

    
1677

    
1678
class LUInstanceMove(LogicalUnit):
1679
  """Move an instance by data-copying.
1680

1681
  """
1682
  HPATH = "instance-move"
1683
  HTYPE = constants.HTYPE_INSTANCE
1684
  REQ_BGL = False
1685

    
1686
  def ExpandNames(self):
1687
    self._ExpandAndLockInstance()
1688
    (self.op.target_node_uuid, self.op.target_node) = \
1689
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1690
                            self.op.target_node)
1691
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1692
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1693
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1694

    
1695
  def DeclareLocks(self, level):
1696
    if level == locking.LEVEL_NODE:
1697
      self._LockInstancesNodes(primary_only=True)
1698
    elif level == locking.LEVEL_NODE_RES:
1699
      # Copy node locks
1700
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1701
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1702

    
1703
  def BuildHooksEnv(self):
1704
    """Build hooks env.
1705

1706
    This runs on master, primary and secondary nodes of the instance.
1707

1708
    """
1709
    env = {
1710
      "TARGET_NODE": self.op.target_node,
1711
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1712
      }
1713
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1714
    return env
1715

    
1716
  def BuildHooksNodes(self):
1717
    """Build hooks nodes.
1718

1719
    """
1720
    nl = [
1721
      self.cfg.GetMasterNode(),
1722
      self.instance.primary_node,
1723
      self.op.target_node_uuid,
1724
      ]
1725
    return (nl, nl)
1726

    
1727
  def CheckPrereq(self):
1728
    """Check prerequisites.
1729

1730
    This checks that the instance is in the cluster.
1731

1732
    """
1733
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1734
    assert self.instance is not None, \
1735
      "Cannot retrieve locked instance %s" % self.op.instance_name
1736

    
1737
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1738
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1739
                                 self.instance.disk_template,
1740
                                 errors.ECODE_STATE)
1741

    
1742
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1743
    assert target_node is not None, \
1744
      "Cannot retrieve locked node %s" % self.op.target_node
1745

    
1746
    self.target_node_uuid = target_node.uuid
1747
    if target_node.uuid == self.instance.primary_node:
1748
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1749
                                 (self.instance.name, target_node.name),
1750
                                 errors.ECODE_STATE)
1751

    
1752
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1753

    
1754
    for idx, dsk in enumerate(self.instance.disks):
1755
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1756
                              constants.DT_SHARED_FILE):
1757
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1758
                                   " cannot copy" % idx, errors.ECODE_STATE)
1759

    
1760
    CheckNodeOnline(self, target_node.uuid)
1761
    CheckNodeNotDrained(self, target_node.uuid)
1762
    CheckNodeVmCapable(self, target_node.uuid)
1763
    cluster = self.cfg.GetClusterInfo()
1764
    group_info = self.cfg.GetNodeGroup(target_node.group)
1765
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1766
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1767
                           ignore=self.op.ignore_ipolicy)
1768

    
1769
    if self.instance.admin_state == constants.ADMINST_UP:
1770
      # check memory requirements on the secondary node
1771
      CheckNodeFreeMemory(
1772
          self, target_node.uuid, "failing over instance %s" %
1773
          self.instance.name, bep[constants.BE_MAXMEM],
1774
          self.instance.hypervisor,
1775
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1776
    else:
1777
      self.LogInfo("Not checking memory on the secondary node as"
1778
                   " instance will not be started")
1779

    
1780
    # check bridge existance
1781
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1782

    
1783
  def Exec(self, feedback_fn):
1784
    """Move an instance.
1785

1786
    The move is done by shutting it down on its present node, copying
1787
    the data over (slow) and starting it on the new node.
1788

1789
    """
1790
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1791
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1792

    
1793
    self.LogInfo("Shutting down instance %s on source node %s",
1794
                 self.instance.name, source_node.name)
1795

    
1796
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1797
            self.owned_locks(locking.LEVEL_NODE_RES))
1798

    
1799
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1800
                                             self.op.shutdown_timeout,
1801
                                             self.op.reason)
1802
    if self.op.ignore_consistency:
1803
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1804
                  " anyway. Please make sure node %s is down. Error details" %
1805
                  (self.instance.name, source_node.name, source_node.name),
1806
                  self.LogWarning)
1807
    else:
1808
      result.Raise("Could not shutdown instance %s on node %s" %
1809
                   (self.instance.name, source_node.name))
1810

    
1811
    # create the target disks
1812
    try:
1813
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1814
    except errors.OpExecError:
1815
      self.LogWarning("Device creation failed")
1816
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1817
      raise
1818

    
1819
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1820

    
1821
    errs = []
1822
    # activate, get path, copy the data over
1823
    for idx, disk in enumerate(self.instance.disks):
1824
      self.LogInfo("Copying data for disk %d", idx)
1825
      result = self.rpc.call_blockdev_assemble(
1826
                 target_node.uuid, (disk, self.instance), self.instance.name,
1827
                 True, idx)
1828
      if result.fail_msg:
1829
        self.LogWarning("Can't assemble newly created disk %d: %s",
1830
                        idx, result.fail_msg)
1831
        errs.append(result.fail_msg)
1832
        break
1833
      dev_path = result.payload
1834
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1835
                                                                self.instance),
1836
                                             target_node.secondary_ip,
1837
                                             dev_path, cluster_name)
1838
      if result.fail_msg:
1839
        self.LogWarning("Can't copy data over for disk %d: %s",
1840
                        idx, result.fail_msg)
1841
        errs.append(result.fail_msg)
1842
        break
1843

    
1844
    if errs:
1845
      self.LogWarning("Some disks failed to copy, aborting")
1846
      try:
1847
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1848
      finally:
1849
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1850
        raise errors.OpExecError("Errors during disk copy: %s" %
1851
                                 (",".join(errs),))
1852

    
1853
    self.instance.primary_node = target_node.uuid
1854
    self.cfg.Update(self.instance, feedback_fn)
1855

    
1856
    self.LogInfo("Removing the disks on the original node")
1857
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1858

    
1859
    # Only start the instance if it's marked as up
1860
    if self.instance.admin_state == constants.ADMINST_UP:
1861
      self.LogInfo("Starting instance %s on node %s",
1862
                   self.instance.name, target_node.name)
1863

    
1864
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1865
                                          ignore_secondaries=True)
1866
      if not disks_ok:
1867
        ShutdownInstanceDisks(self, self.instance)
1868
        raise errors.OpExecError("Can't activate the instance's disks")
1869

    
1870
      result = self.rpc.call_instance_start(target_node.uuid,
1871
                                            (self.instance, None, None), False,
1872
                                            self.op.reason)
1873
      msg = result.fail_msg
1874
      if msg:
1875
        ShutdownInstanceDisks(self, self.instance)
1876
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1877
                                 (self.instance.name, target_node.name, msg))
1878

    
1879

    
1880
class LUInstanceMultiAlloc(NoHooksLU):
1881
  """Allocates multiple instances at the same time.
1882

1883
  """
1884
  REQ_BGL = False
1885

    
1886
  def CheckArguments(self):
1887
    """Check arguments.
1888

1889
    """
1890
    nodes = []
1891
    for inst in self.op.instances:
1892
      if inst.iallocator is not None:
1893
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1894
                                   " instance objects", errors.ECODE_INVAL)
1895
      nodes.append(bool(inst.pnode))
1896
      if inst.disk_template in constants.DTS_INT_MIRROR:
1897
        nodes.append(bool(inst.snode))
1898

    
1899
    has_nodes = compat.any(nodes)
1900
    if compat.all(nodes) ^ has_nodes:
1901
      raise errors.OpPrereqError("There are instance objects providing"
1902
                                 " pnode/snode while others do not",
1903
                                 errors.ECODE_INVAL)
1904

    
1905
    if not has_nodes and self.op.iallocator is None:
1906
      default_iallocator = self.cfg.GetDefaultIAllocator()
1907
      if default_iallocator:
1908
        self.op.iallocator = default_iallocator
1909
      else:
1910
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1911
                                   " given and no cluster-wide default"
1912
                                   " iallocator found; please specify either"
1913
                                   " an iallocator or nodes on the instances"
1914
                                   " or set a cluster-wide default iallocator",
1915
                                   errors.ECODE_INVAL)
1916

    
1917
    _CheckOpportunisticLocking(self.op)
1918

    
1919
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1920
    if dups:
1921
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1922
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1923

    
1924
  def ExpandNames(self):
1925
    """Calculate the locks.
1926

1927
    """
1928
    self.share_locks = ShareAll()
1929
    self.needed_locks = {
1930
      # iallocator will select nodes and even if no iallocator is used,
1931
      # collisions with LUInstanceCreate should be avoided
1932
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1933
      }
1934

    
1935
    if self.op.iallocator:
1936
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1937
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1938

    
1939
      if self.op.opportunistic_locking:
1940
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1941
    else:
1942
      nodeslist = []
1943
      for inst in self.op.instances:
1944
        (inst.pnode_uuid, inst.pnode) = \
1945
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1946
        nodeslist.append(inst.pnode_uuid)
1947
        if inst.snode is not None:
1948
          (inst.snode_uuid, inst.snode) = \
1949
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1950
          nodeslist.append(inst.snode_uuid)
1951

    
1952
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1953
      # Lock resources of instance's primary and secondary nodes (copy to
1954
      # prevent accidential modification)
1955
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1956

    
1957
  def DeclareLocks(self, level):
1958
    if level == locking.LEVEL_NODE_RES and \
1959
      self.opportunistic_locks[locking.LEVEL_NODE]:
1960
      # Even when using opportunistic locking, we require the same set of
1961
      # NODE_RES locks as we got NODE locks
1962
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1963
        self.owned_locks(locking.LEVEL_NODE)
1964

    
1965
  def CheckPrereq(self):
1966
    """Check prerequisite.
1967

1968
    """
1969
    if self.op.iallocator:
1970
      cluster = self.cfg.GetClusterInfo()
1971
      default_vg = self.cfg.GetVGName()
1972
      ec_id = self.proc.GetECId()
1973

    
1974
      if self.op.opportunistic_locking:
1975
        # Only consider nodes for which a lock is held
1976
        node_whitelist = self.cfg.GetNodeNames(
1977
                           list(self.owned_locks(locking.LEVEL_NODE)))
1978
      else:
1979
        node_whitelist = None
1980

    
1981
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1982
                                           _ComputeNics(op, cluster, None,
1983
                                                        self.cfg, ec_id),
1984
                                           _ComputeFullBeParams(op, cluster),
1985
                                           node_whitelist)
1986
               for op in self.op.instances]
1987

    
1988
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1989
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1990

    
1991
      ial.Run(self.op.iallocator)
1992

    
1993
      if not ial.success:
1994
        raise errors.OpPrereqError("Can't compute nodes using"
1995
                                   " iallocator '%s': %s" %
1996
                                   (self.op.iallocator, ial.info),
1997
                                   errors.ECODE_NORES)
1998

    
1999
      self.ia_result = ial.result
2000

    
2001
    if self.op.dry_run:
2002
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2003
        constants.JOB_IDS_KEY: [],
2004
        })
2005

    
2006
  def _ConstructPartialResult(self):
2007
    """Contructs the partial result.
2008

2009
    """
2010
    if self.op.iallocator:
2011
      (allocatable, failed_insts) = self.ia_result
2012
      allocatable_insts = map(compat.fst, allocatable)
2013
    else:
2014
      allocatable_insts = [op.instance_name for op in self.op.instances]
2015
      failed_insts = []
2016

    
2017
    return {
2018
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
2019
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
2020
      }
2021

    
2022
  def Exec(self, feedback_fn):
2023
    """Executes the opcode.
2024

2025
    """
2026
    jobs = []
2027
    if self.op.iallocator:
2028
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
2029
      (allocatable, failed) = self.ia_result
2030

    
2031
      for (name, node_names) in allocatable:
2032
        op = op2inst.pop(name)
2033

    
2034
        (op.pnode_uuid, op.pnode) = \
2035
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2036
        if len(node_names) > 1:
2037
          (op.snode_uuid, op.snode) = \
2038
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2039

    
2040
          jobs.append([op])
2041

    
2042
        missing = set(op2inst.keys()) - set(failed)
2043
        assert not missing, \
2044
          "Iallocator did return incomplete result: %s" % \
2045
          utils.CommaJoin(missing)
2046
    else:
2047
      jobs.extend([op] for op in self.op.instances)
2048

    
2049
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2050

    
2051

    
2052
class _InstNicModPrivate:
2053
  """Data structure for network interface modifications.
2054

2055
  Used by L{LUInstanceSetParams}.
2056

2057
  """
2058
  def __init__(self):
2059
    self.params = None
2060
    self.filled = None
2061

    
2062

    
2063
def _PrepareContainerMods(mods, private_fn):
2064
  """Prepares a list of container modifications by adding a private data field.
2065

2066
  @type mods: list of tuples; (operation, index, parameters)
2067
  @param mods: List of modifications
2068
  @type private_fn: callable or None
2069
  @param private_fn: Callable for constructing a private data field for a
2070
    modification
2071
  @rtype: list
2072

2073
  """
2074
  if private_fn is None:
2075
    fn = lambda: None
2076
  else:
2077
    fn = private_fn
2078

    
2079
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2080

    
2081

    
2082
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2083
  """Checks if nodes have enough physical CPUs
2084

2085
  This function checks if all given nodes have the needed number of
2086
  physical CPUs. In case any node has less CPUs or we cannot get the
2087
  information from the node, this function raises an OpPrereqError
2088
  exception.
2089

2090
  @type lu: C{LogicalUnit}
2091
  @param lu: a logical unit from which we get configuration data
2092
  @type node_uuids: C{list}
2093
  @param node_uuids: the list of node UUIDs to check
2094
  @type requested: C{int}
2095
  @param requested: the minimum acceptable number of physical CPUs
2096
  @type hypervisor_specs: list of pairs (string, dict of strings)
2097
  @param hypervisor_specs: list of hypervisor specifications in
2098
      pairs (hypervisor_name, hvparams)
2099
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2100
      or we cannot check the node
2101

2102
  """
2103
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2104
  for node_uuid in node_uuids:
2105
    info = nodeinfo[node_uuid]
2106
    node_name = lu.cfg.GetNodeName(node_uuid)
2107
    info.Raise("Cannot get current information from node %s" % node_name,
2108
               prereq=True, ecode=errors.ECODE_ENVIRON)
2109
    (_, _, (hv_info, )) = info.payload
2110
    num_cpus = hv_info.get("cpu_total", None)
2111
    if not isinstance(num_cpus, int):
2112
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2113
                                 " on node %s, result was '%s'" %
2114
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2115
    if requested > num_cpus:
2116
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2117
                                 "required" % (node_name, num_cpus, requested),
2118
                                 errors.ECODE_NORES)
2119

    
2120

    
2121
def GetItemFromContainer(identifier, kind, container):
2122
  """Return the item refered by the identifier.
2123

2124
  @type identifier: string
2125
  @param identifier: Item index or name or UUID
2126
  @type kind: string
2127
  @param kind: One-word item description
2128
  @type container: list
2129
  @param container: Container to get the item from
2130

2131
  """
2132
  # Index
2133
  try:
2134
    idx = int(identifier)
2135
    if idx == -1:
2136
      # Append
2137
      absidx = len(container) - 1
2138
    elif idx < 0:
2139
      raise IndexError("Not accepting negative indices other than -1")
2140
    elif idx > len(container):
2141
      raise IndexError("Got %s index %s, but there are only %s" %
2142
                       (kind, idx, len(container)))
2143
    else:
2144
      absidx = idx
2145
    return (absidx, container[idx])
2146
  except ValueError:
2147
    pass
2148

    
2149
  for idx, item in enumerate(container):
2150
    if item.uuid == identifier or item.name == identifier:
2151
      return (idx, item)
2152

    
2153
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2154
                             (kind, identifier), errors.ECODE_NOENT)
2155

    
2156

    
2157
def _ApplyContainerMods(kind, container, chgdesc, mods,
2158
                        create_fn, modify_fn, remove_fn):
2159
  """Applies descriptions in C{mods} to C{container}.
2160

2161
  @type kind: string
2162
  @param kind: One-word item description
2163
  @type container: list
2164
  @param container: Container to modify
2165
  @type chgdesc: None or list
2166
  @param chgdesc: List of applied changes
2167
  @type mods: list
2168
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2169
  @type create_fn: callable
2170
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2171
    receives absolute item index, parameters and private data object as added
2172
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2173
    as list
2174
  @type modify_fn: callable
2175
  @param modify_fn: Callback for modifying an existing item
2176
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2177
    and private data object as added by L{_PrepareContainerMods}, returns
2178
    changes as list
2179
  @type remove_fn: callable
2180
  @param remove_fn: Callback on removing item; receives absolute item index,
2181
    item and private data object as added by L{_PrepareContainerMods}
2182

2183
  """
2184
  for (op, identifier, params, private) in mods:
2185
    changes = None
2186

    
2187
    if op == constants.DDM_ADD:
2188
      # Calculate where item will be added
2189
      # When adding an item, identifier can only be an index
2190
      try:
2191
        idx = int(identifier)
2192
      except ValueError:
2193
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2194
                                   " identifier for %s" % constants.DDM_ADD,
2195
                                   errors.ECODE_INVAL)
2196
      if idx == -1:
2197
        addidx = len(container)
2198
      else:
2199
        if idx < 0:
2200
          raise IndexError("Not accepting negative indices other than -1")
2201
        elif idx > len(container):
2202
          raise IndexError("Got %s index %s, but there are only %s" %
2203
                           (kind, idx, len(container)))
2204
        addidx = idx
2205

    
2206
      if create_fn is None:
2207
        item = params
2208
      else:
2209
        (item, changes) = create_fn(addidx, params, private)
2210

    
2211
      if idx == -1:
2212
        container.append(item)
2213
      else:
2214
        assert idx >= 0
2215
        assert idx <= len(container)
2216
        # list.insert does so before the specified index
2217
        container.insert(idx, item)
2218
    else:
2219
      # Retrieve existing item
2220
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2221

    
2222
      if op == constants.DDM_REMOVE:
2223
        assert not params
2224

    
2225
        if remove_fn is not None:
2226
          remove_fn(absidx, item, private)
2227

    
2228
        changes = [("%s/%s" % (kind, absidx), "remove")]
2229

    
2230
        assert container[absidx] == item
2231
        del container[absidx]
2232
      elif op == constants.DDM_MODIFY:
2233
        if modify_fn is not None:
2234
          changes = modify_fn(absidx, item, params, private)
2235
      else:
2236
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2237

    
2238
    assert _TApplyContModsCbChanges(changes)
2239

    
2240
    if not (chgdesc is None or changes is None):
2241
      chgdesc.extend(changes)
2242

    
2243

    
2244
def _UpdateIvNames(base_index, disks):
2245
  """Updates the C{iv_name} attribute of disks.
2246

2247
  @type disks: list of L{objects.Disk}
2248

2249
  """
2250
  for (idx, disk) in enumerate(disks):
2251
    disk.iv_name = "disk/%s" % (base_index + idx, )
2252

    
2253

    
2254
class LUInstanceSetParams(LogicalUnit):
2255
  """Modifies an instances's parameters.
2256

2257
  """
2258
  HPATH = "instance-modify"
2259
  HTYPE = constants.HTYPE_INSTANCE
2260
  REQ_BGL = False
2261

    
2262
  @staticmethod
2263
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2264
    assert ht.TList(mods)
2265
    assert not mods or len(mods[0]) in (2, 3)
2266

    
2267
    if mods and len(mods[0]) == 2:
2268
      result = []
2269

    
2270
      addremove = 0
2271
      for op, params in mods:
2272
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2273
          result.append((op, -1, params))
2274
          addremove += 1
2275

    
2276
          if addremove > 1:
2277
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2278
                                       " supported at a time" % kind,
2279
                                       errors.ECODE_INVAL)
2280
        else:
2281
          result.append((constants.DDM_MODIFY, op, params))
2282

    
2283
      assert verify_fn(result)
2284
    else:
2285
      result = mods
2286

    
2287
    return result
2288

    
2289
  @staticmethod
2290
  def _CheckMods(kind, mods, key_types, item_fn):
2291
    """Ensures requested disk/NIC modifications are valid.
2292

2293
    """
2294
    for (op, _, params) in mods:
2295
      assert ht.TDict(params)
2296

    
2297
      # If 'key_types' is an empty dict, we assume we have an
2298
      # 'ext' template and thus do not ForceDictType
2299
      if key_types:
2300
        utils.ForceDictType(params, key_types)
2301

    
2302
      if op == constants.DDM_REMOVE:
2303
        if params:
2304
          raise errors.OpPrereqError("No settings should be passed when"
2305
                                     " removing a %s" % kind,
2306
                                     errors.ECODE_INVAL)
2307
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2308
        item_fn(op, params)
2309
      else:
2310
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2311

    
2312
  def _VerifyDiskModification(self, op, params, excl_stor):
2313
    """Verifies a disk modification.
2314

2315
    """
2316
    if op == constants.DDM_ADD:
2317
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2318
      if mode not in constants.DISK_ACCESS_SET:
2319
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2320
                                   errors.ECODE_INVAL)
2321

    
2322
      size = params.get(constants.IDISK_SIZE, None)
2323
      if size is None:
2324
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2325
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2326

    
2327
      try:
2328
        size = int(size)
2329
      except (TypeError, ValueError), err:
2330
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2331
                                   errors.ECODE_INVAL)
2332

    
2333
      params[constants.IDISK_SIZE] = size
2334
      name = params.get(constants.IDISK_NAME, None)
2335
      if name is not None and name.lower() == constants.VALUE_NONE:
2336
        params[constants.IDISK_NAME] = None
2337

    
2338
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2339

    
2340
    elif op == constants.DDM_MODIFY:
2341
      if constants.IDISK_SIZE in params:
2342
        raise errors.OpPrereqError("Disk size change not possible, use"
2343
                                   " grow-disk", errors.ECODE_INVAL)
2344

    
2345
      # Disk modification supports changing only the disk name and mode.
2346
      # Changing arbitrary parameters is allowed only for ext disk template",
2347
      if self.instance.disk_template != constants.DT_EXT:
2348
        utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2349

    
2350
      name = params.get(constants.IDISK_NAME, None)
2351
      if name is not None and name.lower() == constants.VALUE_NONE:
2352
        params[constants.IDISK_NAME] = None
2353

    
2354
  @staticmethod
2355
  def _VerifyNicModification(op, params):
2356
    """Verifies a network interface modification.
2357

2358
    """
2359
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2360
      ip = params.get(constants.INIC_IP, None)
2361
      name = params.get(constants.INIC_NAME, None)
2362
      req_net = params.get(constants.INIC_NETWORK, None)
2363
      link = params.get(constants.NIC_LINK, None)
2364
      mode = params.get(constants.NIC_MODE, None)
2365
      if name is not None and name.lower() == constants.VALUE_NONE:
2366
        params[constants.INIC_NAME] = None
2367
      if req_net is not None:
2368
        if req_net.lower() == constants.VALUE_NONE:
2369
          params[constants.INIC_NETWORK] = None
2370
          req_net = None
2371
        elif link is not None or mode is not None:
2372
          raise errors.OpPrereqError("If network is given"
2373
                                     " mode or link should not",
2374
                                     errors.ECODE_INVAL)
2375

    
2376
      if op == constants.DDM_ADD:
2377
        macaddr = params.get(constants.INIC_MAC, None)
2378
        if macaddr is None:
2379
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2380

    
2381
      if ip is not None:
2382
        if ip.lower() == constants.VALUE_NONE:
2383
          params[constants.INIC_IP] = None
2384
        else:
2385
          if ip.lower() == constants.NIC_IP_POOL:
2386
            if op == constants.DDM_ADD and req_net is None:
2387
              raise errors.OpPrereqError("If ip=pool, parameter network"
2388
                                         " cannot be none",
2389
                                         errors.ECODE_INVAL)
2390
          else:
2391
            if not netutils.IPAddress.IsValid(ip):
2392
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2393
                                         errors.ECODE_INVAL)
2394

    
2395
      if constants.INIC_MAC in params:
2396
        macaddr = params[constants.INIC_MAC]
2397
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2398
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2399

    
2400
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2401
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2402
                                     " modifying an existing NIC",
2403
                                     errors.ECODE_INVAL)
2404

    
2405
  def CheckArguments(self):
2406
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2407
            self.op.hvparams or self.op.beparams or self.op.os_name or
2408
            self.op.osparams or self.op.offline is not None or
2409
            self.op.runtime_mem or self.op.pnode):
2410
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2411

    
2412
    if self.op.hvparams:
2413
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2414
                           "hypervisor", "instance", "cluster")
2415

    
2416
    self.op.disks = self._UpgradeDiskNicMods(
2417
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2418
    self.op.nics = self._UpgradeDiskNicMods(
2419
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2420

    
2421
    if self.op.disks and self.op.disk_template is not None:
2422
      raise errors.OpPrereqError("Disk template conversion and other disk"
2423
                                 " changes not supported at the same time",
2424
                                 errors.ECODE_INVAL)
2425

    
2426
    if (self.op.disk_template and
2427
        self.op.disk_template in constants.DTS_INT_MIRROR and
2428
        self.op.remote_node is None):
2429
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2430
                                 " one requires specifying a secondary node",
2431
                                 errors.ECODE_INVAL)
2432

    
2433
    # Check NIC modifications
2434
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2435
                    self._VerifyNicModification)
2436

    
2437
    if self.op.pnode:
2438
      (self.op.pnode_uuid, self.op.pnode) = \
2439
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2440

    
2441
  def ExpandNames(self):
2442
    self._ExpandAndLockInstance()
2443
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2444
    # Can't even acquire node locks in shared mode as upcoming changes in
2445
    # Ganeti 2.6 will start to modify the node object on disk conversion
2446
    self.needed_locks[locking.LEVEL_NODE] = []
2447
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2448
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2449
    # Look node group to look up the ipolicy
2450
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2451

    
2452
  def DeclareLocks(self, level):
2453
    if level == locking.LEVEL_NODEGROUP:
2454
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2455
      # Acquire locks for the instance's nodegroups optimistically. Needs
2456
      # to be verified in CheckPrereq
2457
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2458
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2459
    elif level == locking.LEVEL_NODE:
2460
      self._LockInstancesNodes()
2461
      if self.op.disk_template and self.op.remote_node:
2462
        (self.op.remote_node_uuid, self.op.remote_node) = \
2463
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2464
                                self.op.remote_node)
2465
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2466
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2467
      # Copy node locks
2468
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2469
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2470

    
2471
  def BuildHooksEnv(self):
2472
    """Build hooks env.
2473

2474
    This runs on the master, primary and secondaries.
2475

2476
    """
2477
    args = {}
2478
    if constants.BE_MINMEM in self.be_new:
2479
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2480
    if constants.BE_MAXMEM in self.be_new:
2481
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2482
    if constants.BE_VCPUS in self.be_new:
2483
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2484
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2485
    # information at all.
2486

    
2487
    if self._new_nics is not None:
2488
      nics = []
2489

    
2490
      for nic in self._new_nics:
2491
        n = copy.deepcopy(nic)
2492
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2493
        n.nicparams = nicparams
2494
        nics.append(NICToTuple(self, n))
2495

    
2496
      args["nics"] = nics
2497

    
2498
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2499
    if self.op.disk_template:
2500
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2501
    if self.op.runtime_mem:
2502
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2503

    
2504
    return env
2505

    
2506
  def BuildHooksNodes(self):
2507
    """Build hooks nodes.
2508

2509
    """
2510
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2511
    return (nl, nl)
2512

    
2513
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2514
                              old_params, cluster, pnode_uuid):
2515

    
2516
    update_params_dict = dict([(key, params[key])
2517
                               for key in constants.NICS_PARAMETERS
2518
                               if key in params])
2519

    
2520
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2521
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2522

    
2523
    new_net_uuid = None
2524
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2525
    if new_net_uuid_or_name:
2526
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2527
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2528

    
2529
    if old_net_uuid:
2530
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2531

    
2532
    if new_net_uuid:
2533
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2534
      if not netparams:
2535
        raise errors.OpPrereqError("No netparams found for the network"
2536
                                   " %s, probably not connected" %
2537
                                   new_net_obj.name, errors.ECODE_INVAL)
2538
      new_params = dict(netparams)
2539
    else:
2540
      new_params = GetUpdatedParams(old_params, update_params_dict)
2541

    
2542
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2543

    
2544
    new_filled_params = cluster.SimpleFillNIC(new_params)
2545
    objects.NIC.CheckParameterSyntax(new_filled_params)
2546

    
2547
    new_mode = new_filled_params[constants.NIC_MODE]
2548
    if new_mode == constants.NIC_MODE_BRIDGED:
2549
      bridge = new_filled_params[constants.NIC_LINK]
2550
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2551
      if msg:
2552
        msg = "Error checking bridges on node '%s': %s" % \
2553
                (self.cfg.GetNodeName(pnode_uuid), msg)
2554
        if self.op.force:
2555
          self.warn.append(msg)
2556
        else:
2557
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2558

    
2559
    elif new_mode == constants.NIC_MODE_ROUTED:
2560
      ip = params.get(constants.INIC_IP, old_ip)
2561
      if ip is None:
2562
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2563
                                   " on a routed NIC", errors.ECODE_INVAL)
2564

    
2565
    elif new_mode == constants.NIC_MODE_OVS:
2566
      # TODO: check OVS link
2567
      self.LogInfo("OVS links are currently not checked for correctness")
2568

    
2569
    if constants.INIC_MAC in params:
2570
      mac = params[constants.INIC_MAC]
2571
      if mac is None:
2572
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2573
                                   errors.ECODE_INVAL)
2574
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2575
        # otherwise generate the MAC address
2576
        params[constants.INIC_MAC] = \
2577
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2578
      else:
2579
        # or validate/reserve the current one
2580
        try:
2581
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2582
        except errors.ReservationError:
2583
          raise errors.OpPrereqError("MAC address '%s' already in use"
2584
                                     " in cluster" % mac,
2585
                                     errors.ECODE_NOTUNIQUE)
2586
    elif new_net_uuid != old_net_uuid:
2587

    
2588
      def get_net_prefix(net_uuid):
2589
        mac_prefix = None
2590
        if net_uuid:
2591
          nobj = self.cfg.GetNetwork(net_uuid)
2592
          mac_prefix = nobj.mac_prefix
2593

    
2594
        return mac_prefix
2595

    
2596
      new_prefix = get_net_prefix(new_net_uuid)
2597
      old_prefix = get_net_prefix(old_net_uuid)
2598
      if old_prefix != new_prefix:
2599
        params[constants.INIC_MAC] = \
2600
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2601

    
2602
    # if there is a change in (ip, network) tuple
2603
    new_ip = params.get(constants.INIC_IP, old_ip)
2604
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2605
      if new_ip:
2606
        # if IP is pool then require a network and generate one IP
2607
        if new_ip.lower() == constants.NIC_IP_POOL:
2608
          if new_net_uuid:
2609
            try:
2610
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2611
            except errors.ReservationError:
2612
              raise errors.OpPrereqError("Unable to get a free IP"
2613
                                         " from the address pool",
2614
                                         errors.ECODE_STATE)
2615
            self.LogInfo("Chose IP %s from network %s",
2616
                         new_ip,
2617
                         new_net_obj.name)
2618
            params[constants.INIC_IP] = new_ip
2619
          else:
2620
            raise errors.OpPrereqError("ip=pool, but no network found",
2621
                                       errors.ECODE_INVAL)
2622
        # Reserve new IP if in the new network if any
2623
        elif new_net_uuid:
2624
          try:
2625
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2626
            self.LogInfo("Reserving IP %s in network %s",
2627
                         new_ip, new_net_obj.name)
2628
          except errors.ReservationError:
2629
            raise errors.OpPrereqError("IP %s not available in network %s" %
2630
                                       (new_ip, new_net_obj.name),
2631
                                       errors.ECODE_NOTUNIQUE)
2632
        # new network is None so check if new IP is a conflicting IP
2633
        elif self.op.conflicts_check:
2634
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2635

    
2636
      # release old IP if old network is not None
2637
      if old_ip and old_net_uuid:
2638
        try:
2639
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2640
        except errors.AddressPoolError:
2641
          logging.warning("Release IP %s not contained in network %s",
2642
                          old_ip, old_net_obj.name)
2643

    
2644
    # there are no changes in (ip, network) tuple and old network is not None
2645
    elif (old_net_uuid is not None and
2646
          (req_link is not None or req_mode is not None)):
2647
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2648
                                 " a NIC that is connected to a network",
2649
                                 errors.ECODE_INVAL)
2650

    
2651
    private.params = new_params
2652
    private.filled = new_filled_params
2653

    
2654
  def _PreCheckDiskTemplate(self, pnode_info):
2655
    """CheckPrereq checks related to a new disk template."""
2656
    # Arguments are passed to avoid configuration lookups
2657
    pnode_uuid = self.instance.primary_node
2658
    if self.instance.disk_template == self.op.disk_template:
2659
      raise errors.OpPrereqError("Instance already has disk template %s" %
2660
                                 self.instance.disk_template,
2661
                                 errors.ECODE_INVAL)
2662

    
2663
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2664
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2665
                                 " cluster." % self.op.disk_template)
2666

    
2667
    if (self.instance.disk_template,
2668
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2669
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2670
                                 " %s to %s" % (self.instance.disk_template,
2671
                                                self.op.disk_template),
2672
                                 errors.ECODE_INVAL)
2673
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2674
                       msg="cannot change disk template")
2675
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2676
      if self.op.remote_node_uuid == pnode_uuid:
2677
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2678
                                   " as the primary node of the instance" %
2679
                                   self.op.remote_node, errors.ECODE_STATE)
2680
      CheckNodeOnline(self, self.op.remote_node_uuid)
2681
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2682
      # FIXME: here we assume that the old instance type is DT_PLAIN
2683
      assert self.instance.disk_template == constants.DT_PLAIN
2684
      disks = [{constants.IDISK_SIZE: d.size,
2685
                constants.IDISK_VG: d.logical_id[0]}
2686
               for d in self.instance.disks]
2687
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2688
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2689

    
2690
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2691
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2692
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2693
                                                              snode_group)
2694
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2695
                             ignore=self.op.ignore_ipolicy)
2696
      if pnode_info.group != snode_info.group:
2697
        self.LogWarning("The primary and secondary nodes are in two"
2698
                        " different node groups; the disk parameters"
2699
                        " from the first disk's node group will be"
2700
                        " used")
2701

    
2702
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2703
      # Make sure none of the nodes require exclusive storage
2704
      nodes = [pnode_info]
2705
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2706
        assert snode_info
2707
        nodes.append(snode_info)
2708
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2709
      if compat.any(map(has_es, nodes)):
2710
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2711
                  " storage is enabled" % (self.instance.disk_template,
2712
                                           self.op.disk_template))
2713
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2714

    
2715
  def _PreCheckDisks(self, ispec):
2716
    """CheckPrereq checks related to disk changes.
2717

2718
    @type ispec: dict
2719
    @param ispec: instance specs to be updated with the new disks
2720

2721
    """
2722
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2723

    
2724
    excl_stor = compat.any(
2725
      rpc.GetExclusiveStorageForNodes(self.cfg,
2726
                                      self.instance.all_nodes).values()
2727
      )
2728

    
2729
    # Check disk modifications. This is done here and not in CheckArguments
2730
    # (as with NICs), because we need to know the instance's disk template
2731
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2732
    if self.instance.disk_template == constants.DT_EXT:
2733
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2734
    else:
2735
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2736
                      ver_fn)
2737

    
2738
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2739

    
2740
    # Check the validity of the `provider' parameter
2741
    if self.instance.disk_template in constants.DT_EXT:
2742
      for mod in self.diskmod:
2743
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2744
        if mod[0] == constants.DDM_ADD:
2745
          if ext_provider is None:
2746
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2747
                                       " '%s' missing, during disk add" %
2748
                                       (constants.DT_EXT,
2749
                                        constants.IDISK_PROVIDER),
2750
                                       errors.ECODE_NOENT)
2751
        elif mod[0] == constants.DDM_MODIFY:
2752
          if ext_provider:
2753
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2754
                                       " modification" %
2755
                                       constants.IDISK_PROVIDER,
2756
                                       errors.ECODE_INVAL)
2757
    else:
2758
      for mod in self.diskmod:
2759
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2760
        if ext_provider is not None:
2761
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2762
                                     " instances of type '%s'" %
2763
                                     (constants.IDISK_PROVIDER,
2764
                                      constants.DT_EXT),
2765
                                     errors.ECODE_INVAL)
2766

    
2767
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2768
      raise errors.OpPrereqError("Disk operations not supported for"
2769
                                 " diskless instances", errors.ECODE_INVAL)
2770

    
2771
    def _PrepareDiskMod(_, disk, params, __):
2772
      disk.name = params.get(constants.IDISK_NAME, None)
2773

    
2774
    # Verify disk changes (operating on a copy)
2775
    disks = copy.deepcopy(self.instance.disks)
2776
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2777
                        _PrepareDiskMod, None)
2778
    utils.ValidateDeviceNames("disk", disks)
2779
    if len(disks) > constants.MAX_DISKS:
2780
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2781
                                 " more" % constants.MAX_DISKS,
2782
                                 errors.ECODE_STATE)
2783
    disk_sizes = [disk.size for disk in self.instance.disks]
2784
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2785
                      self.diskmod if op == constants.DDM_ADD)
2786
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2787
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2788

    
2789
    if self.op.offline is not None and self.op.offline:
2790
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2791
                         msg="can't change to offline")
2792

    
2793
  def CheckPrereq(self):
2794
    """Check prerequisites.
2795

2796
    This only checks the instance list against the existing names.
2797

2798
    """
2799
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2800
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2801
    self.cluster = self.cfg.GetClusterInfo()
2802

    
2803
    assert self.instance is not None, \
2804
      "Cannot retrieve locked instance %s" % self.op.instance_name
2805

    
2806
    pnode_uuid = self.instance.primary_node
2807

    
2808
    self.warn = []
2809

    
2810
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2811
        not self.op.force):
2812
      # verify that the instance is not up
2813
      instance_info = self.rpc.call_instance_info(
2814
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2815
          self.instance.hvparams)
2816
      if instance_info.fail_msg:
2817
        self.warn.append("Can't get instance runtime information: %s" %
2818
                         instance_info.fail_msg)
2819
      elif instance_info.payload:
2820
        raise errors.OpPrereqError("Instance is still running on %s" %
2821
                                   self.cfg.GetNodeName(pnode_uuid),
2822
                                   errors.ECODE_STATE)
2823

    
2824
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2825
    node_uuids = list(self.instance.all_nodes)
2826
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2827

    
2828
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2829
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2830
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2831

    
2832
    # dictionary with instance information after the modification
2833
    ispec = {}
2834

    
2835
    # Prepare NIC modifications
2836
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2837

    
2838
    # OS change
2839
    if self.op.os_name and not self.op.force:
2840
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2841
                     self.op.force_variant)
2842
      instance_os = self.op.os_name
2843
    else:
2844
      instance_os = self.instance.os
2845

    
2846
    assert not (self.op.disk_template and self.op.disks), \
2847
      "Can't modify disk template and apply disk changes at the same time"
2848

    
2849
    if self.op.disk_template:
2850
      self._PreCheckDiskTemplate(pnode_info)
2851

    
2852
    self._PreCheckDisks(ispec)
2853

    
2854
    # hvparams processing
2855
    if self.op.hvparams:
2856
      hv_type = self.instance.hypervisor
2857
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2858
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2859
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2860

    
2861
      # local check
2862
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2863
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2864
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2865
      self.hv_inst = i_hvdict # the new dict (without defaults)
2866
    else:
2867
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2868
                                                   self.instance.os,
2869
                                                   self.instance.hvparams)
2870
      self.hv_new = self.hv_inst = {}
2871

    
2872
    # beparams processing
2873
    if self.op.beparams:
2874
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2875
                                  use_none=True)
2876
      objects.UpgradeBeParams(i_bedict)
2877
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2878
      be_new = self.cluster.SimpleFillBE(i_bedict)
2879
      self.be_proposed = self.be_new = be_new # the new actual values
2880
      self.be_inst = i_bedict # the new dict (without defaults)
2881
    else:
2882
      self.be_new = self.be_inst = {}
2883
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2884
    be_old = self.cluster.FillBE(self.instance)
2885

    
2886
    # CPU param validation -- checking every time a parameter is
2887
    # changed to cover all cases where either CPU mask or vcpus have
2888
    # changed
2889
    if (constants.BE_VCPUS in self.be_proposed and
2890
        constants.HV_CPU_MASK in self.hv_proposed):
2891
      cpu_list = \
2892
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2893
      # Verify mask is consistent with number of vCPUs. Can skip this
2894
      # test if only 1 entry in the CPU mask, which means same mask
2895
      # is applied to all vCPUs.
2896
      if (len(cpu_list) > 1 and
2897
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2898
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2899
                                   " CPU mask [%s]" %
2900
                                   (self.be_proposed[constants.BE_VCPUS],
2901
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2902
                                   errors.ECODE_INVAL)
2903

    
2904
      # Only perform this test if a new CPU mask is given
2905
      if constants.HV_CPU_MASK in self.hv_new:
2906
        # Calculate the largest CPU number requested
2907
        max_requested_cpu = max(map(max, cpu_list))
2908
        # Check that all of the instance's nodes have enough physical CPUs to
2909
        # satisfy the requested CPU mask
2910
        hvspecs = [(self.instance.hypervisor,
2911
                    self.cfg.GetClusterInfo()
2912
                      .hvparams[self.instance.hypervisor])]
2913
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2914
                                max_requested_cpu + 1,
2915
                                hvspecs)
2916

    
2917
    # osparams processing
2918
    if self.op.osparams:
2919
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2920
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2921
      self.os_inst = i_osdict # the new dict (without defaults)
2922
    else:
2923
      self.os_inst = {}
2924

    
2925
    #TODO(dynmem): do the appropriate check involving MINMEM
2926
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2927
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2928
      mem_check_list = [pnode_uuid]
2929
      if be_new[constants.BE_AUTO_BALANCE]:
2930
        # either we changed auto_balance to yes or it was from before
2931
        mem_check_list.extend(self.instance.secondary_nodes)
2932
      instance_info = self.rpc.call_instance_info(
2933
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2934
          self.instance.hvparams)
2935
      hvspecs = [(self.instance.hypervisor,
2936
                  self.cluster.hvparams[self.instance.hypervisor])]
2937
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2938
                                         hvspecs)
2939
      pninfo = nodeinfo[pnode_uuid]
2940
      msg = pninfo.fail_msg
2941
      if msg:
2942
        # Assume the primary node is unreachable and go ahead
2943
        self.warn.append("Can't get info from primary node %s: %s" %
2944
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2945
      else:
2946
        (_, _, (pnhvinfo, )) = pninfo.payload
2947
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2948
          self.warn.append("Node data from primary node %s doesn't contain"
2949
                           " free memory information" %
2950
                           self.cfg.GetNodeName(pnode_uuid))
2951
        elif instance_info.fail_msg:
2952
          self.warn.append("Can't get instance runtime information: %s" %
2953
                           instance_info.fail_msg)
2954
        else:
2955
          if instance_info.payload:
2956
            current_mem = int(instance_info.payload["memory"])
2957
          else:
2958
            # Assume instance not running
2959
            # (there is a slight race condition here, but it's not very
2960
            # probable, and we have no other way to check)
2961
            # TODO: Describe race condition
2962
            current_mem = 0
2963
          #TODO(dynmem): do the appropriate check involving MINMEM
2964
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2965
                      pnhvinfo["memory_free"])
2966
          if miss_mem > 0:
2967
            raise errors.OpPrereqError("This change will prevent the instance"
2968
                                       " from starting, due to %d MB of memory"
2969
                                       " missing on its primary node" %
2970
                                       miss_mem, errors.ECODE_NORES)
2971

    
2972
      if be_new[constants.BE_AUTO_BALANCE]:
2973
        for node_uuid, nres in nodeinfo.items():
2974
          if node_uuid not in self.instance.secondary_nodes:
2975
            continue
2976
          nres.Raise("Can't get info from secondary node %s" %
2977
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2978
                     ecode=errors.ECODE_STATE)
2979
          (_, _, (nhvinfo, )) = nres.payload
2980
          if not isinstance(nhvinfo.get("memory_free", None), int):
2981
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2982
                                       " memory information" %
2983
                                       self.cfg.GetNodeName(node_uuid),
2984
                                       errors.ECODE_STATE)
2985
          #TODO(dynmem): do the appropriate check involving MINMEM
2986
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2987
            raise errors.OpPrereqError("This change will prevent the instance"
2988
                                       " from failover to its secondary node"
2989
                                       " %s, due to not enough memory" %
2990
                                       self.cfg.GetNodeName(node_uuid),
2991
                                       errors.ECODE_STATE)
2992

    
2993
    if self.op.runtime_mem:
2994
      remote_info = self.rpc.call_instance_info(
2995
         self.instance.primary_node, self.instance.name,
2996
         self.instance.hypervisor,
2997
         self.cluster.hvparams[self.instance.hypervisor])
2998
      remote_info.Raise("Error checking node %s" %
2999
                        self.cfg.GetNodeName(self.instance.primary_node))
3000
      if not remote_info.payload: # not running already
3001
        raise errors.OpPrereqError("Instance %s is not running" %
3002
                                   self.instance.name, errors.ECODE_STATE)
3003

    
3004
      current_memory = remote_info.payload["memory"]
3005
      if (not self.op.force and
3006
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3007
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3008
        raise errors.OpPrereqError("Instance %s must have memory between %d"
3009
                                   " and %d MB of memory unless --force is"
3010
                                   " given" %
3011
                                   (self.instance.name,
3012
                                    self.be_proposed[constants.BE_MINMEM],
3013
                                    self.be_proposed[constants.BE_MAXMEM]),
3014
                                   errors.ECODE_INVAL)
3015

    
3016
      delta = self.op.runtime_mem - current_memory
3017
      if delta > 0:
3018
        CheckNodeFreeMemory(
3019
            self, self.instance.primary_node,
3020
            "ballooning memory for instance %s" % self.instance.name, delta,
3021
            self.instance.hypervisor,
3022
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3023

    
3024
    # make self.cluster visible in the functions below
3025
    cluster = self.cluster
3026

    
3027
    def _PrepareNicCreate(_, params, private):
3028
      self._PrepareNicModification(params, private, None, None,
3029
                                   {}, cluster, pnode_uuid)
3030
      return (None, None)
3031

    
3032
    def _PrepareNicMod(_, nic, params, private):
3033
      self._PrepareNicModification(params, private, nic.ip, nic.network,
3034
                                   nic.nicparams, cluster, pnode_uuid)
3035
      return None
3036

    
3037
    def _PrepareNicRemove(_, params, __):
3038
      ip = params.ip
3039
      net = params.network
3040
      if net is not None and ip is not None:
3041
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3042

    
3043
    # Verify NIC changes (operating on copy)
3044
    nics = self.instance.nics[:]
3045
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
3046
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3047
    if len(nics) > constants.MAX_NICS:
3048
      raise errors.OpPrereqError("Instance has too many network interfaces"
3049
                                 " (%d), cannot add more" % constants.MAX_NICS,
3050
                                 errors.ECODE_STATE)
3051

    
3052
    # Pre-compute NIC changes (necessary to use result in hooks)
3053
    self._nic_chgdesc = []
3054
    if self.nicmod:
3055
      # Operate on copies as this is still in prereq
3056
      nics = [nic.Copy() for nic in self.instance.nics]
3057
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3058
                          self._CreateNewNic, self._ApplyNicMods, None)
3059
      # Verify that NIC names are unique and valid
3060
      utils.ValidateDeviceNames("NIC", nics)
3061
      self._new_nics = nics
3062
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3063
    else:
3064
      self._new_nics = None
3065
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3066

    
3067
    if not self.op.ignore_ipolicy:
3068
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3069
                                                              group_info)
3070

    
3071
      # Fill ispec with backend parameters
3072
      ispec[constants.ISPEC_SPINDLE_USE] = \
3073
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3074
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3075
                                                         None)
3076

    
3077
      # Copy ispec to verify parameters with min/max values separately
3078
      if self.op.disk_template:
3079
        new_disk_template = self.op.disk_template
3080
      else:
3081
        new_disk_template = self.instance.disk_template
3082
      ispec_max = ispec.copy()
3083
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3084
        self.be_new.get(constants.BE_MAXMEM, None)
3085
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3086
                                                     new_disk_template)
3087
      ispec_min = ispec.copy()
3088
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3089
        self.be_new.get(constants.BE_MINMEM, None)
3090
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3091
                                                     new_disk_template)
3092

    
3093
      if (res_max or res_min):
3094
        # FIXME: Improve error message by including information about whether
3095
        # the upper or lower limit of the parameter fails the ipolicy.
3096
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3097
               (group_info, group_info.name,
3098
                utils.CommaJoin(set(res_max + res_min))))
3099
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3100

    
3101
  def _ConvertPlainToDrbd(self, feedback_fn):
3102
    """Converts an instance from plain to drbd.
3103

3104
    """
3105
    feedback_fn("Converting template to drbd")
3106
    pnode_uuid = self.instance.primary_node
3107
    snode_uuid = self.op.remote_node_uuid
3108

    
3109
    assert self.instance.disk_template == constants.DT_PLAIN
3110

    
3111
    # create a fake disk info for _GenerateDiskTemplate
3112
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3113
                  constants.IDISK_VG: d.logical_id[0],
3114
                  constants.IDISK_NAME: d.name}
3115
                 for d in self.instance.disks]
3116
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3117
                                     self.instance.uuid, pnode_uuid,
3118
                                     [snode_uuid], disk_info, None, None, 0,
3119
                                     feedback_fn, self.diskparams)
3120
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3121
                                        self.diskparams)
3122
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3123
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3124
    info = GetInstanceInfoText(self.instance)
3125
    feedback_fn("Creating additional volumes...")
3126
    # first, create the missing data and meta devices
3127
    for disk in anno_disks:
3128
      # unfortunately this is... not too nice
3129
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3130
                           info, True, p_excl_stor)
3131
      for child in disk.children:
3132
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3133
                             s_excl_stor)
3134
    # at this stage, all new LVs have been created, we can rename the
3135
    # old ones
3136
    feedback_fn("Renaming original volumes...")
3137
    rename_list = [(o, n.children[0].logical_id)
3138
                   for (o, n) in zip(self.instance.disks, new_disks)]
3139
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3140
    result.Raise("Failed to rename original LVs")
3141

    
3142
    feedback_fn("Initializing DRBD devices...")
3143
    # all child devices are in place, we can now create the DRBD devices
3144
    try:
3145
      for disk in anno_disks:
3146
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3147
                                       (snode_uuid, s_excl_stor)]:
3148
          f_create = node_uuid == pnode_uuid
3149
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3150
                               f_create, excl_stor)
3151
    except errors.GenericError, e:
3152
      feedback_fn("Initializing of DRBD devices failed;"
3153
                  " renaming back original volumes...")
3154
      for disk in new_disks:
3155
        self.cfg.SetDiskID(disk, pnode_uuid)
3156
      rename_back_list = [(n.children[0], o.logical_id)
3157
                          for (n, o) in zip(new_disks, self.instance.disks)]
3158
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3159
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3160
      raise
3161

    
3162
    # at this point, the instance has been modified
3163
    self.instance.disk_template = constants.DT_DRBD8
3164
    self.instance.disks = new_disks
3165
    self.cfg.Update(self.instance, feedback_fn)
3166

    
3167
    # Release node locks while waiting for sync
3168
    ReleaseLocks(self, locking.LEVEL_NODE)
3169

    
3170
    # disks are created, waiting for sync
3171
    disk_abort = not WaitForSync(self, self.instance,
3172
                                 oneshot=not self.op.wait_for_sync)
3173
    if disk_abort:
3174
      raise errors.OpExecError("There are some degraded disks for"
3175
                               " this instance, please cleanup manually")
3176

    
3177
    # Node resource locks will be released by caller
3178

    
3179
  def _ConvertDrbdToPlain(self, feedback_fn):
3180
    """Converts an instance from drbd to plain.
3181

3182
    """
3183
    assert len(self.instance.secondary_nodes) == 1
3184
    assert self.instance.disk_template == constants.DT_DRBD8
3185

    
3186
    pnode_uuid = self.instance.primary_node
3187
    snode_uuid = self.instance.secondary_nodes[0]
3188
    feedback_fn("Converting template to plain")
3189

    
3190
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3191
    new_disks = [d.children[0] for d in self.instance.disks]
3192

    
3193
    # copy over size, mode and name
3194
    for parent, child in zip(old_disks, new_disks):
3195
      child.size = parent.size
3196
      child.mode = parent.mode
3197
      child.name = parent.name
3198

    
3199
    # this is a DRBD disk, return its port to the pool
3200
    # NOTE: this must be done right before the call to cfg.Update!
3201
    for disk in old_disks:
3202
      tcp_port = disk.logical_id[2]
3203
      self.cfg.AddTcpUdpPort(tcp_port)
3204

    
3205
    # update instance structure
3206
    self.instance.disks = new_disks
3207
    self.instance.disk_template = constants.DT_PLAIN
3208
    _UpdateIvNames(0, self.instance.disks)
3209
    self.cfg.Update(self.instance, feedback_fn)
3210

    
3211
    # Release locks in case removing disks takes a while
3212
    ReleaseLocks(self, locking.LEVEL_NODE)
3213

    
3214
    feedback_fn("Removing volumes on the secondary node...")
3215
    for disk in old_disks:
3216
      self.cfg.SetDiskID(disk, snode_uuid)
3217
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3218
      if msg:
3219
        self.LogWarning("Could not remove block device %s on node %s,"
3220
                        " continuing anyway: %s", disk.iv_name,
3221
                        self.cfg.GetNodeName(snode_uuid), msg)
3222

    
3223
    feedback_fn("Removing unneeded volumes on the primary node...")
3224
    for idx, disk in enumerate(old_disks):
3225
      meta = disk.children[1]
3226
      self.cfg.SetDiskID(meta, pnode_uuid)
3227
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3228
      if msg:
3229
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3230
                        " continuing anyway: %s", idx,
3231
                        self.cfg.GetNodeName(pnode_uuid), msg)
3232

    
3233
  def _CreateNewDisk(self, idx, params, _):
3234
    """Creates a new disk.
3235

3236
    """
3237
    # add a new disk
3238
    if self.instance.disk_template in constants.DTS_FILEBASED:
3239
      (file_driver, file_path) = self.instance.disks[0].logical_id
3240
      file_path = os.path.dirname(file_path)
3241
    else:
3242
      file_driver = file_path = None
3243

    
3244
    disk = \
3245
      GenerateDiskTemplate(self, self.instance.disk_template,
3246
                           self.instance.uuid, self.instance.primary_node,
3247
                           self.instance.secondary_nodes, [params], file_path,
3248
                           file_driver, idx, self.Log, self.diskparams)[0]
3249

    
3250
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3251

    
3252
    if self.cluster.prealloc_wipe_disks:
3253
      # Wipe new disk
3254
      WipeOrCleanupDisks(self, self.instance,
3255
                         disks=[(idx, disk, 0)],
3256
                         cleanup=new_disks)
3257

    
3258
    return (disk, [
3259
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3260
      ])
3261

    
3262
  def _ModifyDisk(self, idx, disk, params, _):
3263
    """Modifies a disk.
3264

3265
    """
3266
    changes = []
3267
    if constants.IDISK_MODE in params:
3268
      disk.mode = params.get(constants.IDISK_MODE)
3269
      changes.append(("disk.mode/%d" % idx, disk.mode))
3270

    
3271
    if constants.IDISK_NAME in params:
3272
      disk.name = params.get(constants.IDISK_NAME)
3273
      changes.append(("disk.name/%d" % idx, disk.name))
3274

    
3275
    # Modify arbitrary params in case instance template is ext
3276
    for key, value in params.iteritems():
3277
      if (key not in constants.MODIFIABLE_IDISK_PARAMS and
3278
          self.instance.disk_template == constants.DT_EXT):
3279
        # stolen from GetUpdatedParams: default means reset/delete
3280
        if value.lower() == constants.VALUE_DEFAULT:
3281
          try:
3282
            del disk.params[key]
3283
          except KeyError:
3284
            pass
3285
        else:
3286
          disk.params[key] = value
3287
        changes.append(("disk.params:%s/%d" % (key, idx), value))
3288

    
3289
    return changes
3290

    
3291
  def _RemoveDisk(self, idx, root, _):
3292
    """Removes a disk.
3293

3294
    """
3295
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3296
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3297
                             self.instance.primary_node):
3298
      self.cfg.SetDiskID(disk, node_uuid)
3299
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3300
      if msg:
3301
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3302
                        " continuing anyway", idx,
3303
                        self.cfg.GetNodeName(node_uuid), msg)
3304

    
3305
    # if this is a DRBD disk, return its port to the pool
3306
    if root.dev_type in constants.DTS_DRBD:
3307
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3308

    
3309
  def _CreateNewNic(self, idx, params, private):
3310
    """Creates data structure for a new network interface.
3311

3312
    """
3313
    mac = params[constants.INIC_MAC]
3314
    ip = params.get(constants.INIC_IP, None)
3315
    net = params.get(constants.INIC_NETWORK, None)
3316
    name = params.get(constants.INIC_NAME, None)
3317
    net_uuid = self.cfg.LookupNetwork(net)
3318
    #TODO: not private.filled?? can a nic have no nicparams??
3319
    nicparams = private.filled
3320
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3321
                       nicparams=nicparams)
3322
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3323

    
3324
    return (nobj, [
3325
      ("nic.%d" % idx,
3326
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3327
       (mac, ip, private.filled[constants.NIC_MODE],
3328
       private.filled[constants.NIC_LINK],
3329
       net)),
3330
      ])
3331

    
3332
  def _ApplyNicMods(self, idx, nic, params, private):
3333
    """Modifies a network interface.
3334

3335
    """
3336
    changes = []
3337

    
3338
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3339
      if key in params:
3340
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3341
        setattr(nic, key, params[key])
3342

    
3343
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3344
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3345
    if new_net_uuid != nic.network:
3346
      changes.append(("nic.network/%d" % idx, new_net))
3347
      nic.network = new_net_uuid
3348

    
3349
    if private.filled:
3350
      nic.nicparams = private.filled
3351

    
3352
      for (key, val) in nic.nicparams.items():
3353
        changes.append(("nic.%s/%d" % (key, idx), val))
3354

    
3355
    return changes
3356

    
3357
  def Exec(self, feedback_fn):
3358
    """Modifies an instance.
3359

3360
    All parameters take effect only at the next restart of the instance.
3361

3362
    """
3363
    # Process here the warnings from CheckPrereq, as we don't have a
3364
    # feedback_fn there.
3365
    # TODO: Replace with self.LogWarning
3366
    for warn in self.warn:
3367
      feedback_fn("WARNING: %s" % warn)
3368

    
3369
    assert ((self.op.disk_template is None) ^
3370
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3371
      "Not owning any node resource locks"
3372

    
3373
    result = []
3374

    
3375
    # New primary node
3376
    if self.op.pnode_uuid:
3377
      self.instance.primary_node = self.op.pnode_uuid
3378

    
3379
    # runtime memory
3380
    if self.op.runtime_mem:
3381
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3382
                                                     self.instance,
3383
                                                     self.op.runtime_mem)
3384
      rpcres.Raise("Cannot modify instance runtime memory")
3385
      result.append(("runtime_memory", self.op.runtime_mem))
3386

    
3387
    # Apply disk changes
3388
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3389
                        self._CreateNewDisk, self._ModifyDisk,
3390
                        self._RemoveDisk)
3391
    _UpdateIvNames(0, self.instance.disks)
3392

    
3393
    if self.op.disk_template:
3394
      if __debug__:
3395
        check_nodes = set(self.instance.all_nodes)
3396
        if self.op.remote_node_uuid:
3397
          check_nodes.add(self.op.remote_node_uuid)
3398
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3399
          owned = self.owned_locks(level)
3400
          assert not (check_nodes - owned), \
3401
            ("Not owning the correct locks, owning %r, expected at least %r" %
3402
             (owned, check_nodes))
3403

    
3404
      r_shut = ShutdownInstanceDisks(self, self.instance)
3405
      if not r_shut:
3406
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3407
                                 " proceed with disk template conversion")
3408
      mode = (self.instance.disk_template, self.op.disk_template)
3409
      try:
3410
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3411
      except:
3412
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3413
        raise
3414
      result.append(("disk_template", self.op.disk_template))
3415

    
3416
      assert self.instance.disk_template == self.op.disk_template, \
3417
        ("Expected disk template '%s', found '%s'" %
3418
         (self.op.disk_template, self.instance.disk_template))
3419

    
3420
    # Release node and resource locks if there are any (they might already have
3421
    # been released during disk conversion)
3422
    ReleaseLocks(self, locking.LEVEL_NODE)
3423
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3424

    
3425
    # Apply NIC changes
3426
    if self._new_nics is not None:
3427
      self.instance.nics = self._new_nics
3428
      result.extend(self._nic_chgdesc)
3429

    
3430
    # hvparams changes
3431
    if self.op.hvparams:
3432
      self.instance.hvparams = self.hv_inst
3433
      for key, val in self.op.hvparams.iteritems():
3434
        result.append(("hv/%s" % key, val))
3435

    
3436
    # beparams changes
3437
    if self.op.beparams:
3438
      self.instance.beparams = self.be_inst
3439
      for key, val in self.op.beparams.iteritems():
3440
        result.append(("be/%s" % key, val))
3441

    
3442
    # OS change
3443
    if self.op.os_name:
3444
      self.instance.os = self.op.os_name
3445

    
3446
    # osparams changes
3447
    if self.op.osparams:
3448
      self.instance.osparams = self.os_inst
3449
      for key, val in self.op.osparams.iteritems():
3450
        result.append(("os/%s" % key, val))
3451

    
3452
    if self.op.offline is None:
3453
      # Ignore
3454
      pass
3455
    elif self.op.offline:
3456
      # Mark instance as offline
3457
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3458
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3459
    else:
3460
      # Mark instance as online, but stopped
3461
      self.cfg.MarkInstanceDown(self.instance.uuid)
3462
      result.append(("admin_state", constants.ADMINST_DOWN))
3463

    
3464
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3465

    
3466
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3467
                self.owned_locks(locking.LEVEL_NODE)), \
3468
      "All node locks should have been released by now"
3469

    
3470
    return result
3471

    
3472
  _DISK_CONVERSIONS = {
3473
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3474
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3475
    }
3476

    
3477

    
3478
class LUInstanceChangeGroup(LogicalUnit):
3479
  HPATH = "instance-change-group"
3480
  HTYPE = constants.HTYPE_INSTANCE
3481
  REQ_BGL = False
3482

    
3483
  def ExpandNames(self):
3484
    self.share_locks = ShareAll()
3485

    
3486
    self.needed_locks = {
3487
      locking.LEVEL_NODEGROUP: [],
3488
      locking.LEVEL_NODE: [],
3489
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3490
      }
3491

    
3492
    self._ExpandAndLockInstance()
3493

    
3494
    if self.op.target_groups:
3495
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3496
                                  self.op.target_groups)
3497
    else:
3498
      self.req_target_uuids = None
3499

    
3500
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3501

    
3502
  def DeclareLocks(self, level):
3503
    if level == locking.LEVEL_NODEGROUP:
3504
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3505

    
3506
      if self.req_target_uuids:
3507
        lock_groups = set(self.req_target_uuids)
3508

    
3509
        # Lock all groups used by instance optimistically; this requires going
3510
        # via the node before it's locked, requiring verification later on
3511
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3512
        lock_groups.update(instance_groups)
3513
      else:
3514
        # No target groups, need to lock all of them
3515
        lock_groups = locking.ALL_SET
3516

    
3517
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3518

    
3519
    elif level == locking.LEVEL_NODE:
3520
      if self.req_target_uuids:
3521
        # Lock all nodes used by instances
3522
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3523
        self._LockInstancesNodes()
3524

    
3525
        # Lock all nodes in all potential target groups
3526
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3527
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3528
        member_nodes = [node_uuid
3529
                        for group in lock_groups
3530
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3531
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3532
      else:
3533
        # Lock all nodes as all groups are potential targets
3534
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3535

    
3536
  def CheckPrereq(self):
3537
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3538
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3539
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3540

    
3541
    assert (self.req_target_uuids is None or
3542
            owned_groups.issuperset(self.req_target_uuids))
3543
    assert owned_instance_names == set([self.op.instance_name])
3544

    
3545
    # Get instance information
3546
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3547

    
3548
    # Check if node groups for locked instance are still correct
3549
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3550
      ("Instance %s's nodes changed while we kept the lock" %
3551
       self.op.instance_name)
3552

    
3553
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3554
                                          owned_groups)
3555

    
3556
    if self.req_target_uuids:
3557
      # User requested specific target groups
3558
      self.target_uuids = frozenset(self.req_target_uuids)
3559
    else:
3560
      # All groups except those used by the instance are potential targets
3561
      self.target_uuids = owned_groups - inst_groups
3562

    
3563
    conflicting_groups = self.target_uuids & inst_groups
3564
    if conflicting_groups:
3565
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3566
                                 " used by the instance '%s'" %
3567
                                 (utils.CommaJoin(conflicting_groups),
3568
                                  self.op.instance_name),
3569
                                 errors.ECODE_INVAL)
3570

    
3571
    if not self.target_uuids:
3572
      raise errors.OpPrereqError("There are no possible target groups",
3573
                                 errors.ECODE_INVAL)
3574

    
3575
  def BuildHooksEnv(self):
3576
    """Build hooks env.
3577

3578
    """
3579
    assert self.target_uuids
3580

    
3581
    env = {
3582
      "TARGET_GROUPS": " ".join(self.target_uuids),
3583
      }
3584

    
3585
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3586

    
3587
    return env
3588

    
3589
  def BuildHooksNodes(self):
3590
    """Build hooks nodes.
3591

3592
    """
3593
    mn = self.cfg.GetMasterNode()
3594
    return ([mn], [mn])
3595

    
3596
  def Exec(self, feedback_fn):
3597
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3598

    
3599
    assert instances == [self.op.instance_name], "Instance not locked"
3600

    
3601
    req = iallocator.IAReqGroupChange(instances=instances,
3602
                                      target_groups=list(self.target_uuids))
3603
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3604

    
3605
    ial.Run(self.op.iallocator)
3606

    
3607
    if not ial.success:
3608
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3609
                                 " instance '%s' using iallocator '%s': %s" %
3610
                                 (self.op.instance_name, self.op.iallocator,
3611
                                  ial.info), errors.ECODE_NORES)
3612

    
3613
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3614

    
3615
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3616
                 " instance '%s'", len(jobs), self.op.instance_name)
3617

    
3618
    return ResultWithJobs(jobs)