Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ b9aae98b

History | View | Annotate | Download (140 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234

    
235
    check_params = cluster.SimpleFillNIC(nicparams)
236
    objects.NIC.CheckParameterSyntax(check_params)
237
    net_uuid = cfg.LookupNetwork(net)
238
    name = nic.get(constants.INIC_NAME, None)
239
    if name is not None and name.lower() == constants.VALUE_NONE:
240
      name = None
241
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242
                          network=net_uuid, nicparams=nicparams)
243
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
244
    nics.append(nic_obj)
245

    
246
  return nics
247

    
248

    
249
def _CheckForConflictingIp(lu, ip, node_uuid):
250
  """In case of conflicting IP address raise error.
251

252
  @type ip: string
253
  @param ip: IP address
254
  @type node_uuid: string
255
  @param node_uuid: node UUID
256

257
  """
258
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259
  if conf_net is not None:
260
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261
                                " network %s, but the target NIC does not." %
262
                                (ip, conf_net)),
263
                               errors.ECODE_STATE)
264

    
265
  return (None, None)
266

    
267

    
268
def _ComputeIPolicyInstanceSpecViolation(
269
  ipolicy, instance_spec, disk_template,
270
  _compute_fn=ComputeIPolicySpecViolation):
271
  """Compute if instance specs meets the specs of ipolicy.
272

273
  @type ipolicy: dict
274
  @param ipolicy: The ipolicy to verify against
275
  @param instance_spec: dict
276
  @param instance_spec: The instance spec to verify
277
  @type disk_template: string
278
  @param disk_template: the disk template of the instance
279
  @param _compute_fn: The function to verify ipolicy (unittest only)
280
  @see: L{ComputeIPolicySpecViolation}
281

282
  """
283
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289

    
290
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291
                     disk_sizes, spindle_use, disk_template)
292

    
293

    
294
def _CheckOSVariant(os_obj, name):
295
  """Check whether an OS name conforms to the os variants specification.
296

297
  @type os_obj: L{objects.OS}
298
  @param os_obj: OS object to check
299
  @type name: string
300
  @param name: OS name passed by the user, to check for validity
301

302
  """
303
  variant = objects.OS.GetVariant(name)
304
  if not os_obj.supported_variants:
305
    if variant:
306
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307
                                 " passed)" % (os_obj.name, variant),
308
                                 errors.ECODE_INVAL)
309
    return
310
  if not variant:
311
    raise errors.OpPrereqError("OS name must include a variant",
312
                               errors.ECODE_INVAL)
313

    
314
  if variant not in os_obj.supported_variants:
315
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316

    
317

    
318
class LUInstanceCreate(LogicalUnit):
319
  """Create an instance.
320

321
  """
322
  HPATH = "instance-add"
323
  HTYPE = constants.HTYPE_INSTANCE
324
  REQ_BGL = False
325

    
326
  def _CheckDiskTemplateValid(self):
327
    """Checks validity of disk template.
328

329
    """
330
    cluster = self.cfg.GetClusterInfo()
331
    if self.op.disk_template is None:
332
      # FIXME: It would be better to take the default disk template from the
333
      # ipolicy, but for the ipolicy we need the primary node, which we get from
334
      # the iallocator, which wants the disk template as input. To solve this
335
      # chicken-and-egg problem, it should be possible to specify just a node
336
      # group from the iallocator and take the ipolicy from that.
337
      self.op.disk_template = cluster.enabled_disk_templates[0]
338
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339

    
340
  def _CheckDiskArguments(self):
341
    """Checks validity of disk-related arguments.
342

343
    """
344
    # check that disk's names are unique and valid
345
    utils.ValidateDeviceNames("disk", self.op.disks)
346

    
347
    self._CheckDiskTemplateValid()
348

    
349
    # check disks. parameter names and consistent adopt/no-adopt strategy
350
    has_adopt = has_no_adopt = False
351
    for disk in self.op.disks:
352
      if self.op.disk_template != constants.DT_EXT:
353
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354
      if constants.IDISK_ADOPT in disk:
355
        has_adopt = True
356
      else:
357
        has_no_adopt = True
358
    if has_adopt and has_no_adopt:
359
      raise errors.OpPrereqError("Either all disks are adopted or none is",
360
                                 errors.ECODE_INVAL)
361
    if has_adopt:
362
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363
        raise errors.OpPrereqError("Disk adoption is not supported for the"
364
                                   " '%s' disk template" %
365
                                   self.op.disk_template,
366
                                   errors.ECODE_INVAL)
367
      if self.op.iallocator is not None:
368
        raise errors.OpPrereqError("Disk adoption not allowed with an"
369
                                   " iallocator script", errors.ECODE_INVAL)
370
      if self.op.mode == constants.INSTANCE_IMPORT:
371
        raise errors.OpPrereqError("Disk adoption not allowed for"
372
                                   " instance import", errors.ECODE_INVAL)
373
    else:
374
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
375
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376
                                   " but no 'adopt' parameter given" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379

    
380
    self.adopt_disks = has_adopt
381

    
382
  def CheckArguments(self):
383
    """Check arguments.
384

385
    """
386
    # do not require name_check to ease forward/backward compatibility
387
    # for tools
388
    if self.op.no_install and self.op.start:
389
      self.LogInfo("No-installation mode selected, disabling startup")
390
      self.op.start = False
391
    # validate/normalize the instance name
392
    self.op.instance_name = \
393
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
394

    
395
    if self.op.ip_check and not self.op.name_check:
396
      # TODO: make the ip check more flexible and not depend on the name check
397
      raise errors.OpPrereqError("Cannot do IP address check without a name"
398
                                 " check", errors.ECODE_INVAL)
399

    
400
    # check nics' parameter names
401
    for nic in self.op.nics:
402
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403
    # check that NIC's parameters names are unique and valid
404
    utils.ValidateDeviceNames("NIC", self.op.nics)
405

    
406
    self._CheckDiskArguments()
407

    
408
    # instance name verification
409
    if self.op.name_check:
410
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411
      self.op.instance_name = self.hostname.name
412
      # used in CheckPrereq for ip ping check
413
      self.check_ip = self.hostname.ip
414
    else:
415
      self.check_ip = None
416

    
417
    # file storage checks
418
    if (self.op.file_driver and
419
        not self.op.file_driver in constants.FILE_DRIVER):
420
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
421
                                 self.op.file_driver, errors.ECODE_INVAL)
422

    
423
    ### Node/iallocator related checks
424
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
425

    
426
    if self.op.pnode is not None:
427
      if self.op.disk_template in constants.DTS_INT_MIRROR:
428
        if self.op.snode is None:
429
          raise errors.OpPrereqError("The networked disk templates need"
430
                                     " a mirror node", errors.ECODE_INVAL)
431
      elif self.op.snode:
432
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
433
                        " template")
434
        self.op.snode = None
435

    
436
    _CheckOpportunisticLocking(self.op)
437

    
438
    self._cds = GetClusterDomainSecret()
439

    
440
    if self.op.mode == constants.INSTANCE_IMPORT:
441
      # On import force_variant must be True, because if we forced it at
442
      # initial install, our only chance when importing it back is that it
443
      # works again!
444
      self.op.force_variant = True
445

    
446
      if self.op.no_install:
447
        self.LogInfo("No-installation mode has no effect during import")
448

    
449
    elif self.op.mode == constants.INSTANCE_CREATE:
450
      if self.op.os_type is None:
451
        raise errors.OpPrereqError("No guest OS specified",
452
                                   errors.ECODE_INVAL)
453
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
454
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
455
                                   " installation" % self.op.os_type,
456
                                   errors.ECODE_STATE)
457
      if self.op.disk_template is None:
458
        raise errors.OpPrereqError("No disk template specified",
459
                                   errors.ECODE_INVAL)
460

    
461
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
462
      # Check handshake to ensure both clusters have the same domain secret
463
      src_handshake = self.op.source_handshake
464
      if not src_handshake:
465
        raise errors.OpPrereqError("Missing source handshake",
466
                                   errors.ECODE_INVAL)
467

    
468
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
469
                                                           src_handshake)
470
      if errmsg:
471
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
472
                                   errors.ECODE_INVAL)
473

    
474
      # Load and check source CA
475
      self.source_x509_ca_pem = self.op.source_x509_ca
476
      if not self.source_x509_ca_pem:
477
        raise errors.OpPrereqError("Missing source X509 CA",
478
                                   errors.ECODE_INVAL)
479

    
480
      try:
481
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
482
                                                    self._cds)
483
      except OpenSSL.crypto.Error, err:
484
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
485
                                   (err, ), errors.ECODE_INVAL)
486

    
487
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
488
      if errcode is not None:
489
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
490
                                   errors.ECODE_INVAL)
491

    
492
      self.source_x509_ca = cert
493

    
494
      src_instance_name = self.op.source_instance_name
495
      if not src_instance_name:
496
        raise errors.OpPrereqError("Missing source instance name",
497
                                   errors.ECODE_INVAL)
498

    
499
      self.source_instance_name = \
500
        netutils.GetHostname(name=src_instance_name).name
501

    
502
    else:
503
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
504
                                 self.op.mode, errors.ECODE_INVAL)
505

    
506
  def ExpandNames(self):
507
    """ExpandNames for CreateInstance.
508

509
    Figure out the right locks for instance creation.
510

511
    """
512
    self.needed_locks = {}
513

    
514
    # this is just a preventive check, but someone might still add this
515
    # instance in the meantime, and creation will fail at lock-add time
516
    if self.op.instance_name in\
517
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
518
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
519
                                 self.op.instance_name, errors.ECODE_EXISTS)
520

    
521
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
522

    
523
    if self.op.iallocator:
524
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
525
      # specifying a group on instance creation and then selecting nodes from
526
      # that group
527
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
528
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
529

    
530
      if self.op.opportunistic_locking:
531
        self.opportunistic_locks[locking.LEVEL_NODE] = True
532
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
533
    else:
534
      (self.op.pnode_uuid, self.op.pnode) = \
535
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
536
      nodelist = [self.op.pnode_uuid]
537
      if self.op.snode is not None:
538
        (self.op.snode_uuid, self.op.snode) = \
539
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
540
        nodelist.append(self.op.snode_uuid)
541
      self.needed_locks[locking.LEVEL_NODE] = nodelist
542

    
543
    # in case of import lock the source node too
544
    if self.op.mode == constants.INSTANCE_IMPORT:
545
      src_node = self.op.src_node
546
      src_path = self.op.src_path
547

    
548
      if src_path is None:
549
        self.op.src_path = src_path = self.op.instance_name
550

    
551
      if src_node is None:
552
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
553
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
554
        self.op.src_node = None
555
        if os.path.isabs(src_path):
556
          raise errors.OpPrereqError("Importing an instance from a path"
557
                                     " requires a source node option",
558
                                     errors.ECODE_INVAL)
559
      else:
560
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
561
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
562
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
563
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
564
        if not os.path.isabs(src_path):
565
          self.op.src_path = src_path = \
566
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
567

    
568
    self.needed_locks[locking.LEVEL_NODE_RES] = \
569
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
570

    
571
  def _RunAllocator(self):
572
    """Run the allocator based on input opcode.
573

574
    """
575
    if self.op.opportunistic_locking:
576
      # Only consider nodes for which a lock is held
577
      node_name_whitelist = self.cfg.GetNodeNames(
578
        self.owned_locks(locking.LEVEL_NODE))
579
    else:
580
      node_name_whitelist = None
581

    
582
    #TODO Export network to iallocator so that it chooses a pnode
583
    #     in a nodegroup that has the desired network connected to
584
    req = _CreateInstanceAllocRequest(self.op, self.disks,
585
                                      self.nics, self.be_full,
586
                                      node_name_whitelist)
587
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
588

    
589
    ial.Run(self.op.iallocator)
590

    
591
    if not ial.success:
592
      # When opportunistic locks are used only a temporary failure is generated
593
      if self.op.opportunistic_locking:
594
        ecode = errors.ECODE_TEMP_NORES
595
      else:
596
        ecode = errors.ECODE_NORES
597

    
598
      raise errors.OpPrereqError("Can't compute nodes using"
599
                                 " iallocator '%s': %s" %
600
                                 (self.op.iallocator, ial.info),
601
                                 ecode)
602

    
603
    (self.op.pnode_uuid, self.op.pnode) = \
604
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
605
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
606
                 self.op.instance_name, self.op.iallocator,
607
                 utils.CommaJoin(ial.result))
608

    
609
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
610

    
611
    if req.RequiredNodes() == 2:
612
      (self.op.snode_uuid, self.op.snode) = \
613
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
614

    
615
  def BuildHooksEnv(self):
616
    """Build hooks env.
617

618
    This runs on master, primary and secondary nodes of the instance.
619

620
    """
621
    env = {
622
      "ADD_MODE": self.op.mode,
623
      }
624
    if self.op.mode == constants.INSTANCE_IMPORT:
625
      env["SRC_NODE"] = self.op.src_node
626
      env["SRC_PATH"] = self.op.src_path
627
      env["SRC_IMAGES"] = self.src_images
628

    
629
    env.update(BuildInstanceHookEnv(
630
      name=self.op.instance_name,
631
      primary_node_name=self.op.pnode,
632
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
633
      status=self.op.start,
634
      os_type=self.op.os_type,
635
      minmem=self.be_full[constants.BE_MINMEM],
636
      maxmem=self.be_full[constants.BE_MAXMEM],
637
      vcpus=self.be_full[constants.BE_VCPUS],
638
      nics=NICListToTuple(self, self.nics),
639
      disk_template=self.op.disk_template,
640
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
641
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
642
             for d in self.disks],
643
      bep=self.be_full,
644
      hvp=self.hv_full,
645
      hypervisor_name=self.op.hypervisor,
646
      tags=self.op.tags,
647
      ))
648

    
649
    return env
650

    
651
  def BuildHooksNodes(self):
652
    """Build hooks nodes.
653

654
    """
655
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
656
    return nl, nl
657

    
658
  def _ReadExportInfo(self):
659
    """Reads the export information from disk.
660

661
    It will override the opcode source node and path with the actual
662
    information, if these two were not specified before.
663

664
    @return: the export information
665

666
    """
667
    assert self.op.mode == constants.INSTANCE_IMPORT
668

    
669
    if self.op.src_node_uuid is None:
670
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
671
      exp_list = self.rpc.call_export_list(locked_nodes)
672
      found = False
673
      for node in exp_list:
674
        if exp_list[node].fail_msg:
675
          continue
676
        if self.op.src_path in exp_list[node].payload:
677
          found = True
678
          self.op.src_node = node
679
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
680
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
681
                                            self.op.src_path)
682
          break
683
      if not found:
684
        raise errors.OpPrereqError("No export found for relative path %s" %
685
                                   self.op.src_path, errors.ECODE_INVAL)
686

    
687
    CheckNodeOnline(self, self.op.src_node_uuid)
688
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
689
    result.Raise("No export or invalid export found in dir %s" %
690
                 self.op.src_path)
691

    
692
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
693
    if not export_info.has_section(constants.INISECT_EXP):
694
      raise errors.ProgrammerError("Corrupted export config",
695
                                   errors.ECODE_ENVIRON)
696

    
697
    ei_version = export_info.get(constants.INISECT_EXP, "version")
698
    if int(ei_version) != constants.EXPORT_VERSION:
699
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
700
                                 (ei_version, constants.EXPORT_VERSION),
701
                                 errors.ECODE_ENVIRON)
702
    return export_info
703

    
704
  def _ReadExportParams(self, einfo):
705
    """Use export parameters as defaults.
706

707
    In case the opcode doesn't specify (as in override) some instance
708
    parameters, then try to use them from the export information, if
709
    that declares them.
710

711
    """
712
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
713

    
714
    if not self.op.disks:
715
      disks = []
716
      # TODO: import the disk iv_name too
717
      for idx in range(constants.MAX_DISKS):
718
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
719
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
720
          disks.append({constants.IDISK_SIZE: disk_sz})
721
      self.op.disks = disks
722
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
723
        raise errors.OpPrereqError("No disk info specified and the export"
724
                                   " is missing the disk information",
725
                                   errors.ECODE_INVAL)
726

    
727
    if not self.op.nics:
728
      nics = []
729
      for idx in range(constants.MAX_NICS):
730
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
731
          ndict = {}
732
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
733
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
734
            ndict[name] = v
735
          nics.append(ndict)
736
        else:
737
          break
738
      self.op.nics = nics
739

    
740
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
741
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
742

    
743
    if (self.op.hypervisor is None and
744
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
745
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
746

    
747
    if einfo.has_section(constants.INISECT_HYP):
748
      # use the export parameters but do not override the ones
749
      # specified by the user
750
      for name, value in einfo.items(constants.INISECT_HYP):
751
        if name not in self.op.hvparams:
752
          self.op.hvparams[name] = value
753

    
754
    if einfo.has_section(constants.INISECT_BEP):
755
      # use the parameters, without overriding
756
      for name, value in einfo.items(constants.INISECT_BEP):
757
        if name not in self.op.beparams:
758
          self.op.beparams[name] = value
759
        # Compatibility for the old "memory" be param
760
        if name == constants.BE_MEMORY:
761
          if constants.BE_MAXMEM not in self.op.beparams:
762
            self.op.beparams[constants.BE_MAXMEM] = value
763
          if constants.BE_MINMEM not in self.op.beparams:
764
            self.op.beparams[constants.BE_MINMEM] = value
765
    else:
766
      # try to read the parameters old style, from the main section
767
      for name in constants.BES_PARAMETERS:
768
        if (name not in self.op.beparams and
769
            einfo.has_option(constants.INISECT_INS, name)):
770
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
771

    
772
    if einfo.has_section(constants.INISECT_OSP):
773
      # use the parameters, without overriding
774
      for name, value in einfo.items(constants.INISECT_OSP):
775
        if name not in self.op.osparams:
776
          self.op.osparams[name] = value
777

    
778
  def _RevertToDefaults(self, cluster):
779
    """Revert the instance parameters to the default values.
780

781
    """
782
    # hvparams
783
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
784
    for name in self.op.hvparams.keys():
785
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
786
        del self.op.hvparams[name]
787
    # beparams
788
    be_defs = cluster.SimpleFillBE({})
789
    for name in self.op.beparams.keys():
790
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
791
        del self.op.beparams[name]
792
    # nic params
793
    nic_defs = cluster.SimpleFillNIC({})
794
    for nic in self.op.nics:
795
      for name in constants.NICS_PARAMETERS:
796
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
797
          del nic[name]
798
    # osparams
799
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
800
    for name in self.op.osparams.keys():
801
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
802
        del self.op.osparams[name]
803

    
804
  def _CalculateFileStorageDir(self):
805
    """Calculate final instance file storage dir.
806

807
    """
808
    # file storage dir calculation/check
809
    self.instance_file_storage_dir = None
810
    if self.op.disk_template in constants.DTS_FILEBASED:
811
      # build the full file storage dir path
812
      joinargs = []
813

    
814
      if self.op.disk_template == constants.DT_SHARED_FILE:
815
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
816
      else:
817
        get_fsd_fn = self.cfg.GetFileStorageDir
818

    
819
      cfg_storagedir = get_fsd_fn()
820
      if not cfg_storagedir:
821
        raise errors.OpPrereqError("Cluster file storage dir not defined",
822
                                   errors.ECODE_STATE)
823
      joinargs.append(cfg_storagedir)
824

    
825
      if self.op.file_storage_dir is not None:
826
        joinargs.append(self.op.file_storage_dir)
827

    
828
      joinargs.append(self.op.instance_name)
829

    
830
      # pylint: disable=W0142
831
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
832

    
833
  def CheckPrereq(self): # pylint: disable=R0914
834
    """Check prerequisites.
835

836
    """
837
    self._CalculateFileStorageDir()
838

    
839
    if self.op.mode == constants.INSTANCE_IMPORT:
840
      export_info = self._ReadExportInfo()
841
      self._ReadExportParams(export_info)
842
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
843
    else:
844
      self._old_instance_name = None
845

    
846
    if (not self.cfg.GetVGName() and
847
        self.op.disk_template not in constants.DTS_NOT_LVM):
848
      raise errors.OpPrereqError("Cluster does not support lvm-based"
849
                                 " instances", errors.ECODE_STATE)
850

    
851
    if (self.op.hypervisor is None or
852
        self.op.hypervisor == constants.VALUE_AUTO):
853
      self.op.hypervisor = self.cfg.GetHypervisorType()
854

    
855
    cluster = self.cfg.GetClusterInfo()
856
    enabled_hvs = cluster.enabled_hypervisors
857
    if self.op.hypervisor not in enabled_hvs:
858
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
859
                                 " cluster (%s)" %
860
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
861
                                 errors.ECODE_STATE)
862

    
863
    # Check tag validity
864
    for tag in self.op.tags:
865
      objects.TaggableObject.ValidateTag(tag)
866

    
867
    # check hypervisor parameter syntax (locally)
868
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
869
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
870
                                      self.op.hvparams)
871
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
872
    hv_type.CheckParameterSyntax(filled_hvp)
873
    self.hv_full = filled_hvp
874
    # check that we don't specify global parameters on an instance
875
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
876
                         "instance", "cluster")
877

    
878
    # fill and remember the beparams dict
879
    self.be_full = _ComputeFullBeParams(self.op, cluster)
880

    
881
    # build os parameters
882
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
883

    
884
    # now that hvp/bep are in final format, let's reset to defaults,
885
    # if told to do so
886
    if self.op.identify_defaults:
887
      self._RevertToDefaults(cluster)
888

    
889
    # NIC buildup
890
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
891
                             self.proc.GetECId())
892

    
893
    # disk checks/pre-build
894
    default_vg = self.cfg.GetVGName()
895
    self.disks = ComputeDisks(self.op, default_vg)
896

    
897
    if self.op.mode == constants.INSTANCE_IMPORT:
898
      disk_images = []
899
      for idx in range(len(self.disks)):
900
        option = "disk%d_dump" % idx
901
        if export_info.has_option(constants.INISECT_INS, option):
902
          # FIXME: are the old os-es, disk sizes, etc. useful?
903
          export_name = export_info.get(constants.INISECT_INS, option)
904
          image = utils.PathJoin(self.op.src_path, export_name)
905
          disk_images.append(image)
906
        else:
907
          disk_images.append(False)
908

    
909
      self.src_images = disk_images
910

    
911
      if self.op.instance_name == self._old_instance_name:
912
        for idx, nic in enumerate(self.nics):
913
          if nic.mac == constants.VALUE_AUTO:
914
            nic_mac_ini = "nic%d_mac" % idx
915
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
916

    
917
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
918

    
919
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
920
    if self.op.ip_check:
921
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
922
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
923
                                   (self.check_ip, self.op.instance_name),
924
                                   errors.ECODE_NOTUNIQUE)
925

    
926
    #### mac address generation
927
    # By generating here the mac address both the allocator and the hooks get
928
    # the real final mac address rather than the 'auto' or 'generate' value.
929
    # There is a race condition between the generation and the instance object
930
    # creation, which means that we know the mac is valid now, but we're not
931
    # sure it will be when we actually add the instance. If things go bad
932
    # adding the instance will abort because of a duplicate mac, and the
933
    # creation job will fail.
934
    for nic in self.nics:
935
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
936
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
937

    
938
    #### allocator run
939

    
940
    if self.op.iallocator is not None:
941
      self._RunAllocator()
942

    
943
    # Release all unneeded node locks
944
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
945
                               self.op.src_node_uuid])
946
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
947
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
948
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
949

    
950
    assert (self.owned_locks(locking.LEVEL_NODE) ==
951
            self.owned_locks(locking.LEVEL_NODE_RES)), \
952
      "Node locks differ from node resource locks"
953

    
954
    #### node related checks
955

    
956
    # check primary node
957
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
958
    assert self.pnode is not None, \
959
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
960
    if pnode.offline:
961
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
962
                                 pnode.name, errors.ECODE_STATE)
963
    if pnode.drained:
964
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
965
                                 pnode.name, errors.ECODE_STATE)
966
    if not pnode.vm_capable:
967
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
968
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
969

    
970
    self.secondaries = []
971

    
972
    # Fill in any IPs from IP pools. This must happen here, because we need to
973
    # know the nic's primary node, as specified by the iallocator
974
    for idx, nic in enumerate(self.nics):
975
      net_uuid = nic.network
976
      if net_uuid is not None:
977
        nobj = self.cfg.GetNetwork(net_uuid)
978
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
979
        if netparams is None:
980
          raise errors.OpPrereqError("No netparams found for network"
981
                                     " %s. Propably not connected to"
982
                                     " node's %s nodegroup" %
983
                                     (nobj.name, self.pnode.name),
984
                                     errors.ECODE_INVAL)
985
        self.LogInfo("NIC/%d inherits netparams %s" %
986
                     (idx, netparams.values()))
987
        nic.nicparams = dict(netparams)
988
        if nic.ip is not None:
989
          if nic.ip.lower() == constants.NIC_IP_POOL:
990
            try:
991
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
992
            except errors.ReservationError:
993
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
994
                                         " from the address pool" % idx,
995
                                         errors.ECODE_STATE)
996
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
997
          else:
998
            try:
999
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1000
            except errors.ReservationError:
1001
              raise errors.OpPrereqError("IP address %s already in use"
1002
                                         " or does not belong to network %s" %
1003
                                         (nic.ip, nobj.name),
1004
                                         errors.ECODE_NOTUNIQUE)
1005

    
1006
      # net is None, ip None or given
1007
      elif self.op.conflicts_check:
1008
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1009

    
1010
    # mirror node verification
1011
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1012
      if self.op.snode_uuid == pnode.uuid:
1013
        raise errors.OpPrereqError("The secondary node cannot be the"
1014
                                   " primary node", errors.ECODE_INVAL)
1015
      CheckNodeOnline(self, self.op.snode_uuid)
1016
      CheckNodeNotDrained(self, self.op.snode_uuid)
1017
      CheckNodeVmCapable(self, self.op.snode_uuid)
1018
      self.secondaries.append(self.op.snode_uuid)
1019

    
1020
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1021
      if pnode.group != snode.group:
1022
        self.LogWarning("The primary and secondary nodes are in two"
1023
                        " different node groups; the disk parameters"
1024
                        " from the first disk's node group will be"
1025
                        " used")
1026

    
1027
    nodes = [pnode]
1028
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1029
      nodes.append(snode)
1030
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1031
    excl_stor = compat.any(map(has_es, nodes))
1032
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1033
      raise errors.OpPrereqError("Disk template %s not supported with"
1034
                                 " exclusive storage" % self.op.disk_template,
1035
                                 errors.ECODE_STATE)
1036
    for disk in self.disks:
1037
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1038

    
1039
    node_uuids = [pnode.uuid] + self.secondaries
1040

    
1041
    if not self.adopt_disks:
1042
      if self.op.disk_template == constants.DT_RBD:
1043
        # _CheckRADOSFreeSpace() is just a placeholder.
1044
        # Any function that checks prerequisites can be placed here.
1045
        # Check if there is enough space on the RADOS cluster.
1046
        CheckRADOSFreeSpace()
1047
      elif self.op.disk_template == constants.DT_EXT:
1048
        # FIXME: Function that checks prereqs if needed
1049
        pass
1050
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1051
        # Check lv size requirements, if not adopting
1052
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1053
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1054
      else:
1055
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1056
        pass
1057

    
1058
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1059
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1060
                                disk[constants.IDISK_ADOPT])
1061
                     for disk in self.disks])
1062
      if len(all_lvs) != len(self.disks):
1063
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1064
                                   errors.ECODE_INVAL)
1065
      for lv_name in all_lvs:
1066
        try:
1067
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1068
          # to ReserveLV uses the same syntax
1069
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1070
        except errors.ReservationError:
1071
          raise errors.OpPrereqError("LV named %s used by another instance" %
1072
                                     lv_name, errors.ECODE_NOTUNIQUE)
1073

    
1074
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1075
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1076

    
1077
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1078
                                       vg_names.payload.keys())[pnode.uuid]
1079
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1080
      node_lvs = node_lvs.payload
1081

    
1082
      delta = all_lvs.difference(node_lvs.keys())
1083
      if delta:
1084
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1085
                                   utils.CommaJoin(delta),
1086
                                   errors.ECODE_INVAL)
1087
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1088
      if online_lvs:
1089
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1090
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1091
                                   errors.ECODE_STATE)
1092
      # update the size of disk based on what is found
1093
      for dsk in self.disks:
1094
        dsk[constants.IDISK_SIZE] = \
1095
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1096
                                        dsk[constants.IDISK_ADOPT])][0]))
1097

    
1098
    elif self.op.disk_template == constants.DT_BLOCK:
1099
      # Normalize and de-duplicate device paths
1100
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1101
                       for disk in self.disks])
1102
      if len(all_disks) != len(self.disks):
1103
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1104
                                   errors.ECODE_INVAL)
1105
      baddisks = [d for d in all_disks
1106
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1107
      if baddisks:
1108
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1109
                                   " cannot be adopted" %
1110
                                   (utils.CommaJoin(baddisks),
1111
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1112
                                   errors.ECODE_INVAL)
1113

    
1114
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1115
                                            list(all_disks))[pnode.uuid]
1116
      node_disks.Raise("Cannot get block device information from node %s" %
1117
                       pnode.name)
1118
      node_disks = node_disks.payload
1119
      delta = all_disks.difference(node_disks.keys())
1120
      if delta:
1121
        raise errors.OpPrereqError("Missing block device(s): %s" %
1122
                                   utils.CommaJoin(delta),
1123
                                   errors.ECODE_INVAL)
1124
      for dsk in self.disks:
1125
        dsk[constants.IDISK_SIZE] = \
1126
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1127

    
1128
    # Verify instance specs
1129
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1130
    ispec = {
1131
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1132
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1133
      constants.ISPEC_DISK_COUNT: len(self.disks),
1134
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1135
                                  for disk in self.disks],
1136
      constants.ISPEC_NIC_COUNT: len(self.nics),
1137
      constants.ISPEC_SPINDLE_USE: spindle_use,
1138
      }
1139

    
1140
    group_info = self.cfg.GetNodeGroup(pnode.group)
1141
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1142
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1143
                                               self.op.disk_template)
1144
    if not self.op.ignore_ipolicy and res:
1145
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1146
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1147
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1148

    
1149
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1150

    
1151
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1152
    # check OS parameters (remotely)
1153
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1154

    
1155
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1156

    
1157
    #TODO: _CheckExtParams (remotely)
1158
    # Check parameters for extstorage
1159

    
1160
    # memory check on primary node
1161
    #TODO(dynmem): use MINMEM for checking
1162
    if self.op.start:
1163
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1164
                                self.op.hvparams)
1165
      CheckNodeFreeMemory(self, self.pnode.uuid,
1166
                          "creating instance %s" % self.op.instance_name,
1167
                          self.be_full[constants.BE_MAXMEM],
1168
                          self.op.hypervisor, hvfull)
1169

    
1170
    self.dry_run_result = list(node_uuids)
1171

    
1172
  def Exec(self, feedback_fn):
1173
    """Create and add the instance to the cluster.
1174

1175
    """
1176
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1177
                self.owned_locks(locking.LEVEL_NODE)), \
1178
      "Node locks differ from node resource locks"
1179
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1180

    
1181
    ht_kind = self.op.hypervisor
1182
    if ht_kind in constants.HTS_REQ_PORT:
1183
      network_port = self.cfg.AllocatePort()
1184
    else:
1185
      network_port = None
1186

    
1187
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1188

    
1189
    # This is ugly but we got a chicken-egg problem here
1190
    # We can only take the group disk parameters, as the instance
1191
    # has no disks yet (we are generating them right here).
1192
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1193
    disks = GenerateDiskTemplate(self,
1194
                                 self.op.disk_template,
1195
                                 instance_uuid, self.pnode.uuid,
1196
                                 self.secondaries,
1197
                                 self.disks,
1198
                                 self.instance_file_storage_dir,
1199
                                 self.op.file_driver,
1200
                                 0,
1201
                                 feedback_fn,
1202
                                 self.cfg.GetGroupDiskParams(nodegroup))
1203

    
1204
    iobj = objects.Instance(name=self.op.instance_name,
1205
                            uuid=instance_uuid,
1206
                            os=self.op.os_type,
1207
                            primary_node=self.pnode.uuid,
1208
                            nics=self.nics, disks=disks,
1209
                            disk_template=self.op.disk_template,
1210
                            disks_active=False,
1211
                            admin_state=constants.ADMINST_DOWN,
1212
                            network_port=network_port,
1213
                            beparams=self.op.beparams,
1214
                            hvparams=self.op.hvparams,
1215
                            hypervisor=self.op.hypervisor,
1216
                            osparams=self.op.osparams,
1217
                            )
1218

    
1219
    if self.op.tags:
1220
      for tag in self.op.tags:
1221
        iobj.AddTag(tag)
1222

    
1223
    if self.adopt_disks:
1224
      if self.op.disk_template == constants.DT_PLAIN:
1225
        # rename LVs to the newly-generated names; we need to construct
1226
        # 'fake' LV disks with the old data, plus the new unique_id
1227
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1228
        rename_to = []
1229
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1230
          rename_to.append(t_dsk.logical_id)
1231
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1232
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1233
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1234
                                               zip(tmp_disks, rename_to))
1235
        result.Raise("Failed to rename adoped LVs")
1236
    else:
1237
      feedback_fn("* creating instance disks...")
1238
      try:
1239
        CreateDisks(self, iobj)
1240
      except errors.OpExecError:
1241
        self.LogWarning("Device creation failed")
1242
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1243
        raise
1244

    
1245
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1246

    
1247
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1248

    
1249
    # Declare that we don't want to remove the instance lock anymore, as we've
1250
    # added the instance to the config
1251
    del self.remove_locks[locking.LEVEL_INSTANCE]
1252

    
1253
    if self.op.mode == constants.INSTANCE_IMPORT:
1254
      # Release unused nodes
1255
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1256
    else:
1257
      # Release all nodes
1258
      ReleaseLocks(self, locking.LEVEL_NODE)
1259

    
1260
    disk_abort = False
1261
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1262
      feedback_fn("* wiping instance disks...")
1263
      try:
1264
        WipeDisks(self, iobj)
1265
      except errors.OpExecError, err:
1266
        logging.exception("Wiping disks failed")
1267
        self.LogWarning("Wiping instance disks failed (%s)", err)
1268
        disk_abort = True
1269

    
1270
    if disk_abort:
1271
      # Something is already wrong with the disks, don't do anything else
1272
      pass
1273
    elif self.op.wait_for_sync:
1274
      disk_abort = not WaitForSync(self, iobj)
1275
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1276
      # make sure the disks are not degraded (still sync-ing is ok)
1277
      feedback_fn("* checking mirrors status")
1278
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1279
    else:
1280
      disk_abort = False
1281

    
1282
    if disk_abort:
1283
      RemoveDisks(self, iobj)
1284
      self.cfg.RemoveInstance(iobj.uuid)
1285
      # Make sure the instance lock gets removed
1286
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1287
      raise errors.OpExecError("There are some degraded disks for"
1288
                               " this instance")
1289

    
1290
    # instance disks are now active
1291
    iobj.disks_active = True
1292

    
1293
    # Release all node resource locks
1294
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1295

    
1296
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1297
      # we need to set the disks ID to the primary node, since the
1298
      # preceding code might or might have not done it, depending on
1299
      # disk template and other options
1300
      for disk in iobj.disks:
1301
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1302
      if self.op.mode == constants.INSTANCE_CREATE:
1303
        if not self.op.no_install:
1304
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1305
                        not self.op.wait_for_sync)
1306
          if pause_sync:
1307
            feedback_fn("* pausing disk sync to install instance OS")
1308
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1309
                                                              (iobj.disks,
1310
                                                               iobj), True)
1311
            for idx, success in enumerate(result.payload):
1312
              if not success:
1313
                logging.warn("pause-sync of instance %s for disk %d failed",
1314
                             self.op.instance_name, idx)
1315

    
1316
          feedback_fn("* running the instance OS create scripts...")
1317
          # FIXME: pass debug option from opcode to backend
1318
          os_add_result = \
1319
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1320
                                          self.op.debug_level)
1321
          if pause_sync:
1322
            feedback_fn("* resuming disk sync")
1323
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1324
                                                              (iobj.disks,
1325
                                                               iobj), False)
1326
            for idx, success in enumerate(result.payload):
1327
              if not success:
1328
                logging.warn("resume-sync of instance %s for disk %d failed",
1329
                             self.op.instance_name, idx)
1330

    
1331
          os_add_result.Raise("Could not add os for instance %s"
1332
                              " on node %s" % (self.op.instance_name,
1333
                                               self.pnode.name))
1334

    
1335
      else:
1336
        if self.op.mode == constants.INSTANCE_IMPORT:
1337
          feedback_fn("* running the instance OS import scripts...")
1338

    
1339
          transfers = []
1340

    
1341
          for idx, image in enumerate(self.src_images):
1342
            if not image:
1343
              continue
1344

    
1345
            # FIXME: pass debug option from opcode to backend
1346
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1347
                                               constants.IEIO_FILE, (image, ),
1348
                                               constants.IEIO_SCRIPT,
1349
                                               (iobj.disks[idx], idx),
1350
                                               None)
1351
            transfers.append(dt)
1352

    
1353
          import_result = \
1354
            masterd.instance.TransferInstanceData(self, feedback_fn,
1355
                                                  self.op.src_node_uuid,
1356
                                                  self.pnode.uuid,
1357
                                                  self.pnode.secondary_ip,
1358
                                                  iobj, transfers)
1359
          if not compat.all(import_result):
1360
            self.LogWarning("Some disks for instance %s on node %s were not"
1361
                            " imported successfully" % (self.op.instance_name,
1362
                                                        self.pnode.name))
1363

    
1364
          rename_from = self._old_instance_name
1365

    
1366
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1367
          feedback_fn("* preparing remote import...")
1368
          # The source cluster will stop the instance before attempting to make
1369
          # a connection. In some cases stopping an instance can take a long
1370
          # time, hence the shutdown timeout is added to the connection
1371
          # timeout.
1372
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1373
                             self.op.source_shutdown_timeout)
1374
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1375

    
1376
          assert iobj.primary_node == self.pnode.uuid
1377
          disk_results = \
1378
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1379
                                          self.source_x509_ca,
1380
                                          self._cds, timeouts)
1381
          if not compat.all(disk_results):
1382
            # TODO: Should the instance still be started, even if some disks
1383
            # failed to import (valid for local imports, too)?
1384
            self.LogWarning("Some disks for instance %s on node %s were not"
1385
                            " imported successfully" % (self.op.instance_name,
1386
                                                        self.pnode.name))
1387

    
1388
          rename_from = self.source_instance_name
1389

    
1390
        else:
1391
          # also checked in the prereq part
1392
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1393
                                       % self.op.mode)
1394

    
1395
        # Run rename script on newly imported instance
1396
        assert iobj.name == self.op.instance_name
1397
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1398
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1399
                                                   rename_from,
1400
                                                   self.op.debug_level)
1401
        result.Warn("Failed to run rename script for %s on node %s" %
1402
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1403

    
1404
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1405

    
1406
    if self.op.start:
1407
      iobj.admin_state = constants.ADMINST_UP
1408
      self.cfg.Update(iobj, feedback_fn)
1409
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1410
                   self.pnode.name)
1411
      feedback_fn("* starting instance...")
1412
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1413
                                            False, self.op.reason)
1414
      result.Raise("Could not start instance")
1415

    
1416
    return list(iobj.all_nodes)
1417

    
1418

    
1419
class LUInstanceRename(LogicalUnit):
1420
  """Rename an instance.
1421

1422
  """
1423
  HPATH = "instance-rename"
1424
  HTYPE = constants.HTYPE_INSTANCE
1425

    
1426
  def CheckArguments(self):
1427
    """Check arguments.
1428

1429
    """
1430
    if self.op.ip_check and not self.op.name_check:
1431
      # TODO: make the ip check more flexible and not depend on the name check
1432
      raise errors.OpPrereqError("IP address check requires a name check",
1433
                                 errors.ECODE_INVAL)
1434

    
1435
  def BuildHooksEnv(self):
1436
    """Build hooks env.
1437

1438
    This runs on master, primary and secondary nodes of the instance.
1439

1440
    """
1441
    env = BuildInstanceHookEnvByObject(self, self.instance)
1442
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1443
    return env
1444

    
1445
  def BuildHooksNodes(self):
1446
    """Build hooks nodes.
1447

1448
    """
1449
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1450
    return (nl, nl)
1451

    
1452
  def CheckPrereq(self):
1453
    """Check prerequisites.
1454

1455
    This checks that the instance is in the cluster and is not running.
1456

1457
    """
1458
    (self.op.instance_uuid, self.op.instance_name) = \
1459
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1460
                                self.op.instance_name)
1461
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1462
    assert instance is not None
1463

    
1464
    # It should actually not happen that an instance is running with a disabled
1465
    # disk template, but in case it does, the renaming of file-based instances
1466
    # will fail horribly. Thus, we test it before.
1467
    if (instance.disk_template in constants.DTS_FILEBASED and
1468
        self.op.new_name != instance.name):
1469
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1470
                               instance.disk_template)
1471

    
1472
    CheckNodeOnline(self, instance.primary_node)
1473
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1474
                       msg="cannot rename")
1475
    self.instance = instance
1476

    
1477
    new_name = self.op.new_name
1478
    if self.op.name_check:
1479
      hostname = _CheckHostnameSane(self, new_name)
1480
      new_name = self.op.new_name = hostname.name
1481
      if (self.op.ip_check and
1482
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1483
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1484
                                   (hostname.ip, new_name),
1485
                                   errors.ECODE_NOTUNIQUE)
1486

    
1487
    instance_names = [inst.name for
1488
                      inst in self.cfg.GetAllInstancesInfo().values()]
1489
    if new_name in instance_names and new_name != instance.name:
1490
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1491
                                 new_name, errors.ECODE_EXISTS)
1492

    
1493
  def Exec(self, feedback_fn):
1494
    """Rename the instance.
1495

1496
    """
1497
    old_name = self.instance.name
1498

    
1499
    rename_file_storage = False
1500
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1501
        self.op.new_name != self.instance.name):
1502
      old_file_storage_dir = os.path.dirname(
1503
                               self.instance.disks[0].logical_id[1])
1504
      rename_file_storage = True
1505

    
1506
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1507
    # Change the instance lock. This is definitely safe while we hold the BGL.
1508
    # Otherwise the new lock would have to be added in acquired mode.
1509
    assert self.REQ_BGL
1510
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1511
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1512
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1513

    
1514
    # re-read the instance from the configuration after rename
1515
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1516

    
1517
    if rename_file_storage:
1518
      new_file_storage_dir = os.path.dirname(
1519
                               renamed_inst.disks[0].logical_id[1])
1520
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1521
                                                     old_file_storage_dir,
1522
                                                     new_file_storage_dir)
1523
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1524
                   " (but the instance has been renamed in Ganeti)" %
1525
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1526
                    old_file_storage_dir, new_file_storage_dir))
1527

    
1528
    StartInstanceDisks(self, renamed_inst, None)
1529
    # update info on disks
1530
    info = GetInstanceInfoText(renamed_inst)
1531
    for (idx, disk) in enumerate(renamed_inst.disks):
1532
      for node_uuid in renamed_inst.all_nodes:
1533
        self.cfg.SetDiskID(disk, node_uuid)
1534
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1535
        result.Warn("Error setting info on node %s for disk %s" %
1536
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1537
    try:
1538
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1539
                                                 renamed_inst, old_name,
1540
                                                 self.op.debug_level)
1541
      result.Warn("Could not run OS rename script for instance %s on node %s"
1542
                  " (but the instance has been renamed in Ganeti)" %
1543
                  (renamed_inst.name,
1544
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1545
                  self.LogWarning)
1546
    finally:
1547
      ShutdownInstanceDisks(self, renamed_inst)
1548

    
1549
    return renamed_inst.name
1550

    
1551

    
1552
class LUInstanceRemove(LogicalUnit):
1553
  """Remove an instance.
1554

1555
  """
1556
  HPATH = "instance-remove"
1557
  HTYPE = constants.HTYPE_INSTANCE
1558
  REQ_BGL = False
1559

    
1560
  def ExpandNames(self):
1561
    self._ExpandAndLockInstance()
1562
    self.needed_locks[locking.LEVEL_NODE] = []
1563
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1564
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1565

    
1566
  def DeclareLocks(self, level):
1567
    if level == locking.LEVEL_NODE:
1568
      self._LockInstancesNodes()
1569
    elif level == locking.LEVEL_NODE_RES:
1570
      # Copy node locks
1571
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1572
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1573

    
1574
  def BuildHooksEnv(self):
1575
    """Build hooks env.
1576

1577
    This runs on master, primary and secondary nodes of the instance.
1578

1579
    """
1580
    env = BuildInstanceHookEnvByObject(self, self.instance)
1581
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1582
    return env
1583

    
1584
  def BuildHooksNodes(self):
1585
    """Build hooks nodes.
1586

1587
    """
1588
    nl = [self.cfg.GetMasterNode()]
1589
    nl_post = list(self.instance.all_nodes) + nl
1590
    return (nl, nl_post)
1591

    
1592
  def CheckPrereq(self):
1593
    """Check prerequisites.
1594

1595
    This checks that the instance is in the cluster.
1596

1597
    """
1598
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1599
    assert self.instance is not None, \
1600
      "Cannot retrieve locked instance %s" % self.op.instance_name
1601

    
1602
  def Exec(self, feedback_fn):
1603
    """Remove the instance.
1604

1605
    """
1606
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1607
                 self.cfg.GetNodeName(self.instance.primary_node))
1608

    
1609
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1610
                                             self.instance,
1611
                                             self.op.shutdown_timeout,
1612
                                             self.op.reason)
1613
    if self.op.ignore_failures:
1614
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1615
    else:
1616
      result.Raise("Could not shutdown instance %s on node %s" %
1617
                   (self.instance.name,
1618
                    self.cfg.GetNodeName(self.instance.primary_node)))
1619

    
1620
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1621
            self.owned_locks(locking.LEVEL_NODE_RES))
1622
    assert not (set(self.instance.all_nodes) -
1623
                self.owned_locks(locking.LEVEL_NODE)), \
1624
      "Not owning correct locks"
1625

    
1626
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1627

    
1628

    
1629
class LUInstanceMove(LogicalUnit):
1630
  """Move an instance by data-copying.
1631

1632
  """
1633
  HPATH = "instance-move"
1634
  HTYPE = constants.HTYPE_INSTANCE
1635
  REQ_BGL = False
1636

    
1637
  def ExpandNames(self):
1638
    self._ExpandAndLockInstance()
1639
    (self.op.target_node_uuid, self.op.target_node) = \
1640
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1641
                            self.op.target_node)
1642
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1643
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1644
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1645

    
1646
  def DeclareLocks(self, level):
1647
    if level == locking.LEVEL_NODE:
1648
      self._LockInstancesNodes(primary_only=True)
1649
    elif level == locking.LEVEL_NODE_RES:
1650
      # Copy node locks
1651
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1652
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1653

    
1654
  def BuildHooksEnv(self):
1655
    """Build hooks env.
1656

1657
    This runs on master, primary and secondary nodes of the instance.
1658

1659
    """
1660
    env = {
1661
      "TARGET_NODE": self.op.target_node,
1662
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1663
      }
1664
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1665
    return env
1666

    
1667
  def BuildHooksNodes(self):
1668
    """Build hooks nodes.
1669

1670
    """
1671
    nl = [
1672
      self.cfg.GetMasterNode(),
1673
      self.instance.primary_node,
1674
      self.op.target_node_uuid,
1675
      ]
1676
    return (nl, nl)
1677

    
1678
  def CheckPrereq(self):
1679
    """Check prerequisites.
1680

1681
    This checks that the instance is in the cluster.
1682

1683
    """
1684
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1685
    assert self.instance is not None, \
1686
      "Cannot retrieve locked instance %s" % self.op.instance_name
1687

    
1688
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1689
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1690
                                 self.instance.disk_template,
1691
                                 errors.ECODE_STATE)
1692

    
1693
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1694
    assert target_node is not None, \
1695
      "Cannot retrieve locked node %s" % self.op.target_node
1696

    
1697
    self.target_node_uuid = target_node.uuid
1698
    if target_node.uuid == self.instance.primary_node:
1699
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1700
                                 (self.instance.name, target_node.name),
1701
                                 errors.ECODE_STATE)
1702

    
1703
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1704

    
1705
    for idx, dsk in enumerate(self.instance.disks):
1706
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1707
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1708
                                   " cannot copy" % idx, errors.ECODE_STATE)
1709

    
1710
    CheckNodeOnline(self, target_node.uuid)
1711
    CheckNodeNotDrained(self, target_node.uuid)
1712
    CheckNodeVmCapable(self, target_node.uuid)
1713
    cluster = self.cfg.GetClusterInfo()
1714
    group_info = self.cfg.GetNodeGroup(target_node.group)
1715
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1716
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1717
                           ignore=self.op.ignore_ipolicy)
1718

    
1719
    if self.instance.admin_state == constants.ADMINST_UP:
1720
      # check memory requirements on the secondary node
1721
      CheckNodeFreeMemory(
1722
          self, target_node.uuid, "failing over instance %s" %
1723
          self.instance.name, bep[constants.BE_MAXMEM],
1724
          self.instance.hypervisor,
1725
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1726
    else:
1727
      self.LogInfo("Not checking memory on the secondary node as"
1728
                   " instance will not be started")
1729

    
1730
    # check bridge existance
1731
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1732

    
1733
  def Exec(self, feedback_fn):
1734
    """Move an instance.
1735

1736
    The move is done by shutting it down on its present node, copying
1737
    the data over (slow) and starting it on the new node.
1738

1739
    """
1740
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1741
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1742

    
1743
    self.LogInfo("Shutting down instance %s on source node %s",
1744
                 self.instance.name, source_node.name)
1745

    
1746
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1747
            self.owned_locks(locking.LEVEL_NODE_RES))
1748

    
1749
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1750
                                             self.op.shutdown_timeout,
1751
                                             self.op.reason)
1752
    if self.op.ignore_consistency:
1753
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1754
                  " anyway. Please make sure node %s is down. Error details" %
1755
                  (self.instance.name, source_node.name, source_node.name),
1756
                  self.LogWarning)
1757
    else:
1758
      result.Raise("Could not shutdown instance %s on node %s" %
1759
                   (self.instance.name, source_node.name))
1760

    
1761
    # create the target disks
1762
    try:
1763
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1764
    except errors.OpExecError:
1765
      self.LogWarning("Device creation failed")
1766
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1767
      raise
1768

    
1769
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1770

    
1771
    errs = []
1772
    # activate, get path, copy the data over
1773
    for idx, disk in enumerate(self.instance.disks):
1774
      self.LogInfo("Copying data for disk %d", idx)
1775
      result = self.rpc.call_blockdev_assemble(
1776
                 target_node.uuid, (disk, self.instance), self.instance.name,
1777
                 True, idx)
1778
      if result.fail_msg:
1779
        self.LogWarning("Can't assemble newly created disk %d: %s",
1780
                        idx, result.fail_msg)
1781
        errs.append(result.fail_msg)
1782
        break
1783
      dev_path = result.payload
1784
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1785
                                                                self.instance),
1786
                                             target_node.name, dev_path,
1787
                                             cluster_name)
1788
      if result.fail_msg:
1789
        self.LogWarning("Can't copy data over for disk %d: %s",
1790
                        idx, result.fail_msg)
1791
        errs.append(result.fail_msg)
1792
        break
1793

    
1794
    if errs:
1795
      self.LogWarning("Some disks failed to copy, aborting")
1796
      try:
1797
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1798
      finally:
1799
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1800
        raise errors.OpExecError("Errors during disk copy: %s" %
1801
                                 (",".join(errs),))
1802

    
1803
    self.instance.primary_node = target_node.uuid
1804
    self.cfg.Update(self.instance, feedback_fn)
1805

    
1806
    self.LogInfo("Removing the disks on the original node")
1807
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1808

    
1809
    # Only start the instance if it's marked as up
1810
    if self.instance.admin_state == constants.ADMINST_UP:
1811
      self.LogInfo("Starting instance %s on node %s",
1812
                   self.instance.name, target_node.name)
1813

    
1814
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1815
                                          ignore_secondaries=True)
1816
      if not disks_ok:
1817
        ShutdownInstanceDisks(self, self.instance)
1818
        raise errors.OpExecError("Can't activate the instance's disks")
1819

    
1820
      result = self.rpc.call_instance_start(target_node.uuid,
1821
                                            (self.instance, None, None), False,
1822
                                            self.op.reason)
1823
      msg = result.fail_msg
1824
      if msg:
1825
        ShutdownInstanceDisks(self, self.instance)
1826
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1827
                                 (self.instance.name, target_node.name, msg))
1828

    
1829

    
1830
class LUInstanceMultiAlloc(NoHooksLU):
1831
  """Allocates multiple instances at the same time.
1832

1833
  """
1834
  REQ_BGL = False
1835

    
1836
  def CheckArguments(self):
1837
    """Check arguments.
1838

1839
    """
1840
    nodes = []
1841
    for inst in self.op.instances:
1842
      if inst.iallocator is not None:
1843
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1844
                                   " instance objects", errors.ECODE_INVAL)
1845
      nodes.append(bool(inst.pnode))
1846
      if inst.disk_template in constants.DTS_INT_MIRROR:
1847
        nodes.append(bool(inst.snode))
1848

    
1849
    has_nodes = compat.any(nodes)
1850
    if compat.all(nodes) ^ has_nodes:
1851
      raise errors.OpPrereqError("There are instance objects providing"
1852
                                 " pnode/snode while others do not",
1853
                                 errors.ECODE_INVAL)
1854

    
1855
    if not has_nodes and self.op.iallocator is None:
1856
      default_iallocator = self.cfg.GetDefaultIAllocator()
1857
      if default_iallocator:
1858
        self.op.iallocator = default_iallocator
1859
      else:
1860
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1861
                                   " given and no cluster-wide default"
1862
                                   " iallocator found; please specify either"
1863
                                   " an iallocator or nodes on the instances"
1864
                                   " or set a cluster-wide default iallocator",
1865
                                   errors.ECODE_INVAL)
1866

    
1867
    _CheckOpportunisticLocking(self.op)
1868

    
1869
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1870
    if dups:
1871
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1872
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1873

    
1874
  def ExpandNames(self):
1875
    """Calculate the locks.
1876

1877
    """
1878
    self.share_locks = ShareAll()
1879
    self.needed_locks = {
1880
      # iallocator will select nodes and even if no iallocator is used,
1881
      # collisions with LUInstanceCreate should be avoided
1882
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1883
      }
1884

    
1885
    if self.op.iallocator:
1886
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1887
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1888

    
1889
      if self.op.opportunistic_locking:
1890
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1891
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1892
    else:
1893
      nodeslist = []
1894
      for inst in self.op.instances:
1895
        (inst.pnode_uuid, inst.pnode) = \
1896
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1897
        nodeslist.append(inst.pnode)
1898
        if inst.snode is not None:
1899
          (inst.snode_uuid, inst.snode) = \
1900
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1901
          nodeslist.append(inst.snode)
1902

    
1903
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1904
      # Lock resources of instance's primary and secondary nodes (copy to
1905
      # prevent accidential modification)
1906
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1907

    
1908
  def CheckPrereq(self):
1909
    """Check prerequisite.
1910

1911
    """
1912
    if self.op.iallocator:
1913
      cluster = self.cfg.GetClusterInfo()
1914
      default_vg = self.cfg.GetVGName()
1915
      ec_id = self.proc.GetECId()
1916

    
1917
      if self.op.opportunistic_locking:
1918
        # Only consider nodes for which a lock is held
1919
        node_whitelist = self.cfg.GetNodeNames(
1920
                           list(self.owned_locks(locking.LEVEL_NODE)))
1921
      else:
1922
        node_whitelist = None
1923

    
1924
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1925
                                           _ComputeNics(op, cluster, None,
1926
                                                        self.cfg, ec_id),
1927
                                           _ComputeFullBeParams(op, cluster),
1928
                                           node_whitelist)
1929
               for op in self.op.instances]
1930

    
1931
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1932
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1933

    
1934
      ial.Run(self.op.iallocator)
1935

    
1936
      if not ial.success:
1937
        raise errors.OpPrereqError("Can't compute nodes using"
1938
                                   " iallocator '%s': %s" %
1939
                                   (self.op.iallocator, ial.info),
1940
                                   errors.ECODE_NORES)
1941

    
1942
      self.ia_result = ial.result
1943

    
1944
    if self.op.dry_run:
1945
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1946
        constants.JOB_IDS_KEY: [],
1947
        })
1948

    
1949
  def _ConstructPartialResult(self):
1950
    """Contructs the partial result.
1951

1952
    """
1953
    if self.op.iallocator:
1954
      (allocatable, failed_insts) = self.ia_result
1955
      allocatable_insts = map(compat.fst, allocatable)
1956
    else:
1957
      allocatable_insts = [op.instance_name for op in self.op.instances]
1958
      failed_insts = []
1959

    
1960
    return {
1961
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1962
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1963
      }
1964

    
1965
  def Exec(self, feedback_fn):
1966
    """Executes the opcode.
1967

1968
    """
1969
    jobs = []
1970
    if self.op.iallocator:
1971
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
1972
      (allocatable, failed) = self.ia_result
1973

    
1974
      for (name, node_names) in allocatable:
1975
        op = op2inst.pop(name)
1976

    
1977
        (op.pnode_uuid, op.pnode) = \
1978
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1979
        if len(node_names) > 1:
1980
          (op.snode_uuid, op.snode) = \
1981
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1982

    
1983
          jobs.append([op])
1984

    
1985
        missing = set(op2inst.keys()) - set(failed)
1986
        assert not missing, \
1987
          "Iallocator did return incomplete result: %s" % \
1988
          utils.CommaJoin(missing)
1989
    else:
1990
      jobs.extend([op] for op in self.op.instances)
1991

    
1992
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1993

    
1994

    
1995
class _InstNicModPrivate:
1996
  """Data structure for network interface modifications.
1997

1998
  Used by L{LUInstanceSetParams}.
1999

2000
  """
2001
  def __init__(self):
2002
    self.params = None
2003
    self.filled = None
2004

    
2005

    
2006
def _PrepareContainerMods(mods, private_fn):
2007
  """Prepares a list of container modifications by adding a private data field.
2008

2009
  @type mods: list of tuples; (operation, index, parameters)
2010
  @param mods: List of modifications
2011
  @type private_fn: callable or None
2012
  @param private_fn: Callable for constructing a private data field for a
2013
    modification
2014
  @rtype: list
2015

2016
  """
2017
  if private_fn is None:
2018
    fn = lambda: None
2019
  else:
2020
    fn = private_fn
2021

    
2022
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2023

    
2024

    
2025
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2026
  """Checks if nodes have enough physical CPUs
2027

2028
  This function checks if all given nodes have the needed number of
2029
  physical CPUs. In case any node has less CPUs or we cannot get the
2030
  information from the node, this function raises an OpPrereqError
2031
  exception.
2032

2033
  @type lu: C{LogicalUnit}
2034
  @param lu: a logical unit from which we get configuration data
2035
  @type node_uuids: C{list}
2036
  @param node_uuids: the list of node UUIDs to check
2037
  @type requested: C{int}
2038
  @param requested: the minimum acceptable number of physical CPUs
2039
  @type hypervisor_specs: list of pairs (string, dict of strings)
2040
  @param hypervisor_specs: list of hypervisor specifications in
2041
      pairs (hypervisor_name, hvparams)
2042
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2043
      or we cannot check the node
2044

2045
  """
2046
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2047
  for node_uuid in node_uuids:
2048
    info = nodeinfo[node_uuid]
2049
    node_name = lu.cfg.GetNodeName(node_uuid)
2050
    info.Raise("Cannot get current information from node %s" % node_name,
2051
               prereq=True, ecode=errors.ECODE_ENVIRON)
2052
    (_, _, (hv_info, )) = info.payload
2053
    num_cpus = hv_info.get("cpu_total", None)
2054
    if not isinstance(num_cpus, int):
2055
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2056
                                 " on node %s, result was '%s'" %
2057
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2058
    if requested > num_cpus:
2059
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2060
                                 "required" % (node_name, num_cpus, requested),
2061
                                 errors.ECODE_NORES)
2062

    
2063

    
2064
def GetItemFromContainer(identifier, kind, container):
2065
  """Return the item refered by the identifier.
2066

2067
  @type identifier: string
2068
  @param identifier: Item index or name or UUID
2069
  @type kind: string
2070
  @param kind: One-word item description
2071
  @type container: list
2072
  @param container: Container to get the item from
2073

2074
  """
2075
  # Index
2076
  try:
2077
    idx = int(identifier)
2078
    if idx == -1:
2079
      # Append
2080
      absidx = len(container) - 1
2081
    elif idx < 0:
2082
      raise IndexError("Not accepting negative indices other than -1")
2083
    elif idx > len(container):
2084
      raise IndexError("Got %s index %s, but there are only %s" %
2085
                       (kind, idx, len(container)))
2086
    else:
2087
      absidx = idx
2088
    return (absidx, container[idx])
2089
  except ValueError:
2090
    pass
2091

    
2092
  for idx, item in enumerate(container):
2093
    if item.uuid == identifier or item.name == identifier:
2094
      return (idx, item)
2095

    
2096
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2097
                             (kind, identifier), errors.ECODE_NOENT)
2098

    
2099

    
2100
def _ApplyContainerMods(kind, container, chgdesc, mods,
2101
                        create_fn, modify_fn, remove_fn):
2102
  """Applies descriptions in C{mods} to C{container}.
2103

2104
  @type kind: string
2105
  @param kind: One-word item description
2106
  @type container: list
2107
  @param container: Container to modify
2108
  @type chgdesc: None or list
2109
  @param chgdesc: List of applied changes
2110
  @type mods: list
2111
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2112
  @type create_fn: callable
2113
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2114
    receives absolute item index, parameters and private data object as added
2115
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2116
    as list
2117
  @type modify_fn: callable
2118
  @param modify_fn: Callback for modifying an existing item
2119
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2120
    and private data object as added by L{_PrepareContainerMods}, returns
2121
    changes as list
2122
  @type remove_fn: callable
2123
  @param remove_fn: Callback on removing item; receives absolute item index,
2124
    item and private data object as added by L{_PrepareContainerMods}
2125

2126
  """
2127
  for (op, identifier, params, private) in mods:
2128
    changes = None
2129

    
2130
    if op == constants.DDM_ADD:
2131
      # Calculate where item will be added
2132
      # When adding an item, identifier can only be an index
2133
      try:
2134
        idx = int(identifier)
2135
      except ValueError:
2136
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2137
                                   " identifier for %s" % constants.DDM_ADD,
2138
                                   errors.ECODE_INVAL)
2139
      if idx == -1:
2140
        addidx = len(container)
2141
      else:
2142
        if idx < 0:
2143
          raise IndexError("Not accepting negative indices other than -1")
2144
        elif idx > len(container):
2145
          raise IndexError("Got %s index %s, but there are only %s" %
2146
                           (kind, idx, len(container)))
2147
        addidx = idx
2148

    
2149
      if create_fn is None:
2150
        item = params
2151
      else:
2152
        (item, changes) = create_fn(addidx, params, private)
2153

    
2154
      if idx == -1:
2155
        container.append(item)
2156
      else:
2157
        assert idx >= 0
2158
        assert idx <= len(container)
2159
        # list.insert does so before the specified index
2160
        container.insert(idx, item)
2161
    else:
2162
      # Retrieve existing item
2163
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2164

    
2165
      if op == constants.DDM_REMOVE:
2166
        assert not params
2167

    
2168
        if remove_fn is not None:
2169
          remove_fn(absidx, item, private)
2170

    
2171
        changes = [("%s/%s" % (kind, absidx), "remove")]
2172

    
2173
        assert container[absidx] == item
2174
        del container[absidx]
2175
      elif op == constants.DDM_MODIFY:
2176
        if modify_fn is not None:
2177
          changes = modify_fn(absidx, item, params, private)
2178
      else:
2179
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2180

    
2181
    assert _TApplyContModsCbChanges(changes)
2182

    
2183
    if not (chgdesc is None or changes is None):
2184
      chgdesc.extend(changes)
2185

    
2186

    
2187
def _UpdateIvNames(base_index, disks):
2188
  """Updates the C{iv_name} attribute of disks.
2189

2190
  @type disks: list of L{objects.Disk}
2191

2192
  """
2193
  for (idx, disk) in enumerate(disks):
2194
    disk.iv_name = "disk/%s" % (base_index + idx, )
2195

    
2196

    
2197
class LUInstanceSetParams(LogicalUnit):
2198
  """Modifies an instances's parameters.
2199

2200
  """
2201
  HPATH = "instance-modify"
2202
  HTYPE = constants.HTYPE_INSTANCE
2203
  REQ_BGL = False
2204

    
2205
  @staticmethod
2206
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2207
    assert ht.TList(mods)
2208
    assert not mods or len(mods[0]) in (2, 3)
2209

    
2210
    if mods and len(mods[0]) == 2:
2211
      result = []
2212

    
2213
      addremove = 0
2214
      for op, params in mods:
2215
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2216
          result.append((op, -1, params))
2217
          addremove += 1
2218

    
2219
          if addremove > 1:
2220
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2221
                                       " supported at a time" % kind,
2222
                                       errors.ECODE_INVAL)
2223
        else:
2224
          result.append((constants.DDM_MODIFY, op, params))
2225

    
2226
      assert verify_fn(result)
2227
    else:
2228
      result = mods
2229

    
2230
    return result
2231

    
2232
  @staticmethod
2233
  def _CheckMods(kind, mods, key_types, item_fn):
2234
    """Ensures requested disk/NIC modifications are valid.
2235

2236
    """
2237
    for (op, _, params) in mods:
2238
      assert ht.TDict(params)
2239

    
2240
      # If 'key_types' is an empty dict, we assume we have an
2241
      # 'ext' template and thus do not ForceDictType
2242
      if key_types:
2243
        utils.ForceDictType(params, key_types)
2244

    
2245
      if op == constants.DDM_REMOVE:
2246
        if params:
2247
          raise errors.OpPrereqError("No settings should be passed when"
2248
                                     " removing a %s" % kind,
2249
                                     errors.ECODE_INVAL)
2250
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2251
        item_fn(op, params)
2252
      else:
2253
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2254

    
2255
  @staticmethod
2256
  def _VerifyDiskModification(op, params, excl_stor):
2257
    """Verifies a disk modification.
2258

2259
    """
2260
    if op == constants.DDM_ADD:
2261
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2262
      if mode not in constants.DISK_ACCESS_SET:
2263
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2264
                                   errors.ECODE_INVAL)
2265

    
2266
      size = params.get(constants.IDISK_SIZE, None)
2267
      if size is None:
2268
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2269
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2270

    
2271
      try:
2272
        size = int(size)
2273
      except (TypeError, ValueError), err:
2274
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2275
                                   errors.ECODE_INVAL)
2276

    
2277
      params[constants.IDISK_SIZE] = size
2278
      name = params.get(constants.IDISK_NAME, None)
2279
      if name is not None and name.lower() == constants.VALUE_NONE:
2280
        params[constants.IDISK_NAME] = None
2281

    
2282
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2283

    
2284
    elif op == constants.DDM_MODIFY:
2285
      if constants.IDISK_SIZE in params:
2286
        raise errors.OpPrereqError("Disk size change not possible, use"
2287
                                   " grow-disk", errors.ECODE_INVAL)
2288
      if len(params) > 2:
2289
        raise errors.OpPrereqError("Disk modification doesn't support"
2290
                                   " additional arbitrary parameters",
2291
                                   errors.ECODE_INVAL)
2292
      name = params.get(constants.IDISK_NAME, None)
2293
      if name is not None and name.lower() == constants.VALUE_NONE:
2294
        params[constants.IDISK_NAME] = None
2295

    
2296
  @staticmethod
2297
  def _VerifyNicModification(op, params):
2298
    """Verifies a network interface modification.
2299

2300
    """
2301
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2302
      ip = params.get(constants.INIC_IP, None)
2303
      name = params.get(constants.INIC_NAME, None)
2304
      req_net = params.get(constants.INIC_NETWORK, None)
2305
      link = params.get(constants.NIC_LINK, None)
2306
      mode = params.get(constants.NIC_MODE, None)
2307
      if name is not None and name.lower() == constants.VALUE_NONE:
2308
        params[constants.INIC_NAME] = None
2309
      if req_net is not None:
2310
        if req_net.lower() == constants.VALUE_NONE:
2311
          params[constants.INIC_NETWORK] = None
2312
          req_net = None
2313
        elif link is not None or mode is not None:
2314
          raise errors.OpPrereqError("If network is given"
2315
                                     " mode or link should not",
2316
                                     errors.ECODE_INVAL)
2317

    
2318
      if op == constants.DDM_ADD:
2319
        macaddr = params.get(constants.INIC_MAC, None)
2320
        if macaddr is None:
2321
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2322

    
2323
      if ip is not None:
2324
        if ip.lower() == constants.VALUE_NONE:
2325
          params[constants.INIC_IP] = None
2326
        else:
2327
          if ip.lower() == constants.NIC_IP_POOL:
2328
            if op == constants.DDM_ADD and req_net is None:
2329
              raise errors.OpPrereqError("If ip=pool, parameter network"
2330
                                         " cannot be none",
2331
                                         errors.ECODE_INVAL)
2332
          else:
2333
            if not netutils.IPAddress.IsValid(ip):
2334
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2335
                                         errors.ECODE_INVAL)
2336

    
2337
      if constants.INIC_MAC in params:
2338
        macaddr = params[constants.INIC_MAC]
2339
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2340
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2341

    
2342
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2343
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2344
                                     " modifying an existing NIC",
2345
                                     errors.ECODE_INVAL)
2346

    
2347
  def CheckArguments(self):
2348
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2349
            self.op.hvparams or self.op.beparams or self.op.os_name or
2350
            self.op.offline is not None or self.op.runtime_mem or
2351
            self.op.pnode):
2352
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2353

    
2354
    if self.op.hvparams:
2355
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2356
                           "hypervisor", "instance", "cluster")
2357

    
2358
    self.op.disks = self._UpgradeDiskNicMods(
2359
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2360
    self.op.nics = self._UpgradeDiskNicMods(
2361
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2362

    
2363
    if self.op.disks and self.op.disk_template is not None:
2364
      raise errors.OpPrereqError("Disk template conversion and other disk"
2365
                                 " changes not supported at the same time",
2366
                                 errors.ECODE_INVAL)
2367

    
2368
    if (self.op.disk_template and
2369
        self.op.disk_template in constants.DTS_INT_MIRROR and
2370
        self.op.remote_node is None):
2371
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2372
                                 " one requires specifying a secondary node",
2373
                                 errors.ECODE_INVAL)
2374

    
2375
    # Check NIC modifications
2376
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2377
                    self._VerifyNicModification)
2378

    
2379
    if self.op.pnode:
2380
      (self.op.pnode_uuid, self.op.pnode) = \
2381
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2382

    
2383
  def ExpandNames(self):
2384
    self._ExpandAndLockInstance()
2385
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2386
    # Can't even acquire node locks in shared mode as upcoming changes in
2387
    # Ganeti 2.6 will start to modify the node object on disk conversion
2388
    self.needed_locks[locking.LEVEL_NODE] = []
2389
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2390
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2391
    # Look node group to look up the ipolicy
2392
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2393

    
2394
  def DeclareLocks(self, level):
2395
    if level == locking.LEVEL_NODEGROUP:
2396
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2397
      # Acquire locks for the instance's nodegroups optimistically. Needs
2398
      # to be verified in CheckPrereq
2399
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2400
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2401
    elif level == locking.LEVEL_NODE:
2402
      self._LockInstancesNodes()
2403
      if self.op.disk_template and self.op.remote_node:
2404
        (self.op.remote_node_uuid, self.op.remote_node) = \
2405
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2406
                                self.op.remote_node)
2407
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2408
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2409
      # Copy node locks
2410
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2411
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2412

    
2413
  def BuildHooksEnv(self):
2414
    """Build hooks env.
2415

2416
    This runs on the master, primary and secondaries.
2417

2418
    """
2419
    args = {}
2420
    if constants.BE_MINMEM in self.be_new:
2421
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2422
    if constants.BE_MAXMEM in self.be_new:
2423
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2424
    if constants.BE_VCPUS in self.be_new:
2425
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2426
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2427
    # information at all.
2428

    
2429
    if self._new_nics is not None:
2430
      nics = []
2431

    
2432
      for nic in self._new_nics:
2433
        n = copy.deepcopy(nic)
2434
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2435
        n.nicparams = nicparams
2436
        nics.append(NICToTuple(self, n))
2437

    
2438
      args["nics"] = nics
2439

    
2440
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2441
    if self.op.disk_template:
2442
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2443
    if self.op.runtime_mem:
2444
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2445

    
2446
    return env
2447

    
2448
  def BuildHooksNodes(self):
2449
    """Build hooks nodes.
2450

2451
    """
2452
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2453
    return (nl, nl)
2454

    
2455
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2456
                              old_params, cluster, pnode_uuid):
2457

    
2458
    update_params_dict = dict([(key, params[key])
2459
                               for key in constants.NICS_PARAMETERS
2460
                               if key in params])
2461

    
2462
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2463
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2464

    
2465
    new_net_uuid = None
2466
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2467
    if new_net_uuid_or_name:
2468
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2469
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2470

    
2471
    if old_net_uuid:
2472
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2473

    
2474
    if new_net_uuid:
2475
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2476
      if not netparams:
2477
        raise errors.OpPrereqError("No netparams found for the network"
2478
                                   " %s, probably not connected" %
2479
                                   new_net_obj.name, errors.ECODE_INVAL)
2480
      new_params = dict(netparams)
2481
    else:
2482
      new_params = GetUpdatedParams(old_params, update_params_dict)
2483

    
2484
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2485

    
2486
    new_filled_params = cluster.SimpleFillNIC(new_params)
2487
    objects.NIC.CheckParameterSyntax(new_filled_params)
2488

    
2489
    new_mode = new_filled_params[constants.NIC_MODE]
2490
    if new_mode == constants.NIC_MODE_BRIDGED:
2491
      bridge = new_filled_params[constants.NIC_LINK]
2492
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2493
      if msg:
2494
        msg = "Error checking bridges on node '%s': %s" % \
2495
                (self.cfg.GetNodeName(pnode_uuid), msg)
2496
        if self.op.force:
2497
          self.warn.append(msg)
2498
        else:
2499
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2500

    
2501
    elif new_mode == constants.NIC_MODE_ROUTED:
2502
      ip = params.get(constants.INIC_IP, old_ip)
2503
      if ip is None:
2504
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2505
                                   " on a routed NIC", errors.ECODE_INVAL)
2506

    
2507
    elif new_mode == constants.NIC_MODE_OVS:
2508
      # TODO: check OVS link
2509
      self.LogInfo("OVS links are currently not checked for correctness")
2510

    
2511
    if constants.INIC_MAC in params:
2512
      mac = params[constants.INIC_MAC]
2513
      if mac is None:
2514
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2515
                                   errors.ECODE_INVAL)
2516
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2517
        # otherwise generate the MAC address
2518
        params[constants.INIC_MAC] = \
2519
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2520
      else:
2521
        # or validate/reserve the current one
2522
        try:
2523
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2524
        except errors.ReservationError:
2525
          raise errors.OpPrereqError("MAC address '%s' already in use"
2526
                                     " in cluster" % mac,
2527
                                     errors.ECODE_NOTUNIQUE)
2528
    elif new_net_uuid != old_net_uuid:
2529

    
2530
      def get_net_prefix(net_uuid):
2531
        mac_prefix = None
2532
        if net_uuid:
2533
          nobj = self.cfg.GetNetwork(net_uuid)
2534
          mac_prefix = nobj.mac_prefix
2535

    
2536
        return mac_prefix
2537

    
2538
      new_prefix = get_net_prefix(new_net_uuid)
2539
      old_prefix = get_net_prefix(old_net_uuid)
2540
      if old_prefix != new_prefix:
2541
        params[constants.INIC_MAC] = \
2542
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2543

    
2544
    # if there is a change in (ip, network) tuple
2545
    new_ip = params.get(constants.INIC_IP, old_ip)
2546
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2547
      if new_ip:
2548
        # if IP is pool then require a network and generate one IP
2549
        if new_ip.lower() == constants.NIC_IP_POOL:
2550
          if new_net_uuid:
2551
            try:
2552
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2553
            except errors.ReservationError:
2554
              raise errors.OpPrereqError("Unable to get a free IP"
2555
                                         " from the address pool",
2556
                                         errors.ECODE_STATE)
2557
            self.LogInfo("Chose IP %s from network %s",
2558
                         new_ip,
2559
                         new_net_obj.name)
2560
            params[constants.INIC_IP] = new_ip
2561
          else:
2562
            raise errors.OpPrereqError("ip=pool, but no network found",
2563
                                       errors.ECODE_INVAL)
2564
        # Reserve new IP if in the new network if any
2565
        elif new_net_uuid:
2566
          try:
2567
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2568
            self.LogInfo("Reserving IP %s in network %s",
2569
                         new_ip, new_net_obj.name)
2570
          except errors.ReservationError:
2571
            raise errors.OpPrereqError("IP %s not available in network %s" %
2572
                                       (new_ip, new_net_obj.name),
2573
                                       errors.ECODE_NOTUNIQUE)
2574
        # new network is None so check if new IP is a conflicting IP
2575
        elif self.op.conflicts_check:
2576
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2577

    
2578
      # release old IP if old network is not None
2579
      if old_ip and old_net_uuid:
2580
        try:
2581
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2582
        except errors.AddressPoolError:
2583
          logging.warning("Release IP %s not contained in network %s",
2584
                          old_ip, old_net_obj.name)
2585

    
2586
    # there are no changes in (ip, network) tuple and old network is not None
2587
    elif (old_net_uuid is not None and
2588
          (req_link is not None or req_mode is not None)):
2589
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2590
                                 " a NIC that is connected to a network",
2591
                                 errors.ECODE_INVAL)
2592

    
2593
    private.params = new_params
2594
    private.filled = new_filled_params
2595

    
2596
  def _PreCheckDiskTemplate(self, pnode_info):
2597
    """CheckPrereq checks related to a new disk template."""
2598
    # Arguments are passed to avoid configuration lookups
2599
    pnode_uuid = self.instance.primary_node
2600
    if self.instance.disk_template == self.op.disk_template:
2601
      raise errors.OpPrereqError("Instance already has disk template %s" %
2602
                                 self.instance.disk_template,
2603
                                 errors.ECODE_INVAL)
2604

    
2605
    if not self.cluster.IsDiskTemplateEnabled(self.instance.disk_template):
2606
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2607
                                 " cluster." % self.instance.disk_template)
2608

    
2609
    if (self.instance.disk_template,
2610
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2611
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2612
                                 " %s to %s" % (self.instance.disk_template,
2613
                                                self.op.disk_template),
2614
                                 errors.ECODE_INVAL)
2615
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2616
                       msg="cannot change disk template")
2617
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2618
      if self.op.remote_node_uuid == pnode_uuid:
2619
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2620
                                   " as the primary node of the instance" %
2621
                                   self.op.remote_node, errors.ECODE_STATE)
2622
      CheckNodeOnline(self, self.op.remote_node_uuid)
2623
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2624
      # FIXME: here we assume that the old instance type is DT_PLAIN
2625
      assert self.instance.disk_template == constants.DT_PLAIN
2626
      disks = [{constants.IDISK_SIZE: d.size,
2627
                constants.IDISK_VG: d.logical_id[0]}
2628
               for d in self.instance.disks]
2629
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2630
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2631

    
2632
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2633
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2634
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2635
                                                              snode_group)
2636
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2637
                             ignore=self.op.ignore_ipolicy)
2638
      if pnode_info.group != snode_info.group:
2639
        self.LogWarning("The primary and secondary nodes are in two"
2640
                        " different node groups; the disk parameters"
2641
                        " from the first disk's node group will be"
2642
                        " used")
2643

    
2644
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2645
      # Make sure none of the nodes require exclusive storage
2646
      nodes = [pnode_info]
2647
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2648
        assert snode_info
2649
        nodes.append(snode_info)
2650
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2651
      if compat.any(map(has_es, nodes)):
2652
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2653
                  " storage is enabled" % (self.instance.disk_template,
2654
                                           self.op.disk_template))
2655
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2656

    
2657
  def _PreCheckDisks(self, ispec):
2658
    """CheckPrereq checks related to disk changes.
2659

2660
    @type ispec: dict
2661
    @param ispec: instance specs to be updated with the new disks
2662

2663
    """
2664
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2665

    
2666
    excl_stor = compat.any(
2667
      rpc.GetExclusiveStorageForNodes(self.cfg,
2668
                                      self.instance.all_nodes).values()
2669
      )
2670

    
2671
    # Check disk modifications. This is done here and not in CheckArguments
2672
    # (as with NICs), because we need to know the instance's disk template
2673
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2674
    if self.instance.disk_template == constants.DT_EXT:
2675
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2676
    else:
2677
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2678
                      ver_fn)
2679

    
2680
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2681

    
2682
    # Check the validity of the `provider' parameter
2683
    if self.instance.disk_template in constants.DT_EXT:
2684
      for mod in self.diskmod:
2685
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2686
        if mod[0] == constants.DDM_ADD:
2687
          if ext_provider is None:
2688
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2689
                                       " '%s' missing, during disk add" %
2690
                                       (constants.DT_EXT,
2691
                                        constants.IDISK_PROVIDER),
2692
                                       errors.ECODE_NOENT)
2693
        elif mod[0] == constants.DDM_MODIFY:
2694
          if ext_provider:
2695
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2696
                                       " modification" %
2697
                                       constants.IDISK_PROVIDER,
2698
                                       errors.ECODE_INVAL)
2699
    else:
2700
      for mod in self.diskmod:
2701
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2702
        if ext_provider is not None:
2703
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2704
                                     " instances of type '%s'" %
2705
                                     (constants.IDISK_PROVIDER,
2706
                                      constants.DT_EXT),
2707
                                     errors.ECODE_INVAL)
2708

    
2709
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2710
      raise errors.OpPrereqError("Disk operations not supported for"
2711
                                 " diskless instances", errors.ECODE_INVAL)
2712

    
2713
    def _PrepareDiskMod(_, disk, params, __):
2714
      disk.name = params.get(constants.IDISK_NAME, None)
2715

    
2716
    # Verify disk changes (operating on a copy)
2717
    disks = copy.deepcopy(self.instance.disks)
2718
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2719
                        _PrepareDiskMod, None)
2720
    utils.ValidateDeviceNames("disk", disks)
2721
    if len(disks) > constants.MAX_DISKS:
2722
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2723
                                 " more" % constants.MAX_DISKS,
2724
                                 errors.ECODE_STATE)
2725
    disk_sizes = [disk.size for disk in self.instance.disks]
2726
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2727
                      self.diskmod if op == constants.DDM_ADD)
2728
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2729
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2730

    
2731
    if self.op.offline is not None and self.op.offline:
2732
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2733
                         msg="can't change to offline")
2734

    
2735
  def CheckPrereq(self):
2736
    """Check prerequisites.
2737

2738
    This only checks the instance list against the existing names.
2739

2740
    """
2741
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2742
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2743
    self.cluster = self.cfg.GetClusterInfo()
2744

    
2745
    assert self.instance is not None, \
2746
      "Cannot retrieve locked instance %s" % self.op.instance_name
2747

    
2748
    pnode_uuid = self.instance.primary_node
2749

    
2750
    self.warn = []
2751

    
2752
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2753
        not self.op.force):
2754
      # verify that the instance is not up
2755
      instance_info = self.rpc.call_instance_info(
2756
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2757
          self.instance.hvparams)
2758
      if instance_info.fail_msg:
2759
        self.warn.append("Can't get instance runtime information: %s" %
2760
                         instance_info.fail_msg)
2761
      elif instance_info.payload:
2762
        raise errors.OpPrereqError("Instance is still running on %s" %
2763
                                   self.cfg.GetNodeName(pnode_uuid),
2764
                                   errors.ECODE_STATE)
2765

    
2766
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2767
    node_uuids = list(self.instance.all_nodes)
2768
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2769

    
2770
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2771
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2772
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2773

    
2774
    # dictionary with instance information after the modification
2775
    ispec = {}
2776

    
2777
    # Prepare NIC modifications
2778
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2779

    
2780
    # OS change
2781
    if self.op.os_name and not self.op.force:
2782
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2783
                     self.op.force_variant)
2784
      instance_os = self.op.os_name
2785
    else:
2786
      instance_os = self.instance.os
2787

    
2788
    assert not (self.op.disk_template and self.op.disks), \
2789
      "Can't modify disk template and apply disk changes at the same time"
2790

    
2791
    if self.op.disk_template:
2792
      self._PreCheckDiskTemplate(pnode_info)
2793

    
2794
    self._PreCheckDisks(ispec)
2795

    
2796
    # hvparams processing
2797
    if self.op.hvparams:
2798
      hv_type = self.instance.hypervisor
2799
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2800
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2801
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2802

    
2803
      # local check
2804
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2805
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2806
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2807
      self.hv_inst = i_hvdict # the new dict (without defaults)
2808
    else:
2809
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2810
                                                   self.instance.os,
2811
                                                   self.instance.hvparams)
2812
      self.hv_new = self.hv_inst = {}
2813

    
2814
    # beparams processing
2815
    if self.op.beparams:
2816
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2817
                                  use_none=True)
2818
      objects.UpgradeBeParams(i_bedict)
2819
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2820
      be_new = self.cluster.SimpleFillBE(i_bedict)
2821
      self.be_proposed = self.be_new = be_new # the new actual values
2822
      self.be_inst = i_bedict # the new dict (without defaults)
2823
    else:
2824
      self.be_new = self.be_inst = {}
2825
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2826
    be_old = self.cluster.FillBE(self.instance)
2827

    
2828
    # CPU param validation -- checking every time a parameter is
2829
    # changed to cover all cases where either CPU mask or vcpus have
2830
    # changed
2831
    if (constants.BE_VCPUS in self.be_proposed and
2832
        constants.HV_CPU_MASK in self.hv_proposed):
2833
      cpu_list = \
2834
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2835
      # Verify mask is consistent with number of vCPUs. Can skip this
2836
      # test if only 1 entry in the CPU mask, which means same mask
2837
      # is applied to all vCPUs.
2838
      if (len(cpu_list) > 1 and
2839
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2840
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2841
                                   " CPU mask [%s]" %
2842
                                   (self.be_proposed[constants.BE_VCPUS],
2843
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2844
                                   errors.ECODE_INVAL)
2845

    
2846
      # Only perform this test if a new CPU mask is given
2847
      if constants.HV_CPU_MASK in self.hv_new:
2848
        # Calculate the largest CPU number requested
2849
        max_requested_cpu = max(map(max, cpu_list))
2850
        # Check that all of the instance's nodes have enough physical CPUs to
2851
        # satisfy the requested CPU mask
2852
        hvspecs = [(self.instance.hypervisor,
2853
                    self.cfg.GetClusterInfo()
2854
                      .hvparams[self.instance.hypervisor])]
2855
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2856
                                max_requested_cpu + 1,
2857
                                hvspecs)
2858

    
2859
    # osparams processing
2860
    if self.op.osparams:
2861
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2862
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2863
      self.os_inst = i_osdict # the new dict (without defaults)
2864
    else:
2865
      self.os_inst = {}
2866

    
2867
    #TODO(dynmem): do the appropriate check involving MINMEM
2868
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2869
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2870
      mem_check_list = [pnode_uuid]
2871
      if be_new[constants.BE_AUTO_BALANCE]:
2872
        # either we changed auto_balance to yes or it was from before
2873
        mem_check_list.extend(self.instance.secondary_nodes)
2874
      instance_info = self.rpc.call_instance_info(
2875
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2876
          self.instance.hvparams)
2877
      hvspecs = [(self.instance.hypervisor,
2878
                  self.cluster.hvparams[self.instance.hypervisor])]
2879
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2880
                                         hvspecs)
2881
      pninfo = nodeinfo[pnode_uuid]
2882
      msg = pninfo.fail_msg
2883
      if msg:
2884
        # Assume the primary node is unreachable and go ahead
2885
        self.warn.append("Can't get info from primary node %s: %s" %
2886
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2887
      else:
2888
        (_, _, (pnhvinfo, )) = pninfo.payload
2889
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2890
          self.warn.append("Node data from primary node %s doesn't contain"
2891
                           " free memory information" %
2892
                           self.cfg.GetNodeName(pnode_uuid))
2893
        elif instance_info.fail_msg:
2894
          self.warn.append("Can't get instance runtime information: %s" %
2895
                           instance_info.fail_msg)
2896
        else:
2897
          if instance_info.payload:
2898
            current_mem = int(instance_info.payload["memory"])
2899
          else:
2900
            # Assume instance not running
2901
            # (there is a slight race condition here, but it's not very
2902
            # probable, and we have no other way to check)
2903
            # TODO: Describe race condition
2904
            current_mem = 0
2905
          #TODO(dynmem): do the appropriate check involving MINMEM
2906
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2907
                      pnhvinfo["memory_free"])
2908
          if miss_mem > 0:
2909
            raise errors.OpPrereqError("This change will prevent the instance"
2910
                                       " from starting, due to %d MB of memory"
2911
                                       " missing on its primary node" %
2912
                                       miss_mem, errors.ECODE_NORES)
2913

    
2914
      if be_new[constants.BE_AUTO_BALANCE]:
2915
        for node_uuid, nres in nodeinfo.items():
2916
          if node_uuid not in self.instance.secondary_nodes:
2917
            continue
2918
          nres.Raise("Can't get info from secondary node %s" %
2919
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2920
                     ecode=errors.ECODE_STATE)
2921
          (_, _, (nhvinfo, )) = nres.payload
2922
          if not isinstance(nhvinfo.get("memory_free", None), int):
2923
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2924
                                       " memory information" %
2925
                                       self.cfg.GetNodeName(node_uuid),
2926
                                       errors.ECODE_STATE)
2927
          #TODO(dynmem): do the appropriate check involving MINMEM
2928
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2929
            raise errors.OpPrereqError("This change will prevent the instance"
2930
                                       " from failover to its secondary node"
2931
                                       " %s, due to not enough memory" %
2932
                                       self.cfg.GetNodeName(node_uuid),
2933
                                       errors.ECODE_STATE)
2934

    
2935
    if self.op.runtime_mem:
2936
      remote_info = self.rpc.call_instance_info(
2937
         self.instance.primary_node, self.instance.name,
2938
         self.instance.hypervisor,
2939
         self.cluster.hvparams[self.instance.hypervisor])
2940
      remote_info.Raise("Error checking node %s" %
2941
                        self.cfg.GetNodeName(self.instance.primary_node))
2942
      if not remote_info.payload: # not running already
2943
        raise errors.OpPrereqError("Instance %s is not running" %
2944
                                   self.instance.name, errors.ECODE_STATE)
2945

    
2946
      current_memory = remote_info.payload["memory"]
2947
      if (not self.op.force and
2948
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2949
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2950
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2951
                                   " and %d MB of memory unless --force is"
2952
                                   " given" %
2953
                                   (self.instance.name,
2954
                                    self.be_proposed[constants.BE_MINMEM],
2955
                                    self.be_proposed[constants.BE_MAXMEM]),
2956
                                   errors.ECODE_INVAL)
2957

    
2958
      delta = self.op.runtime_mem - current_memory
2959
      if delta > 0:
2960
        CheckNodeFreeMemory(
2961
            self, self.instance.primary_node,
2962
            "ballooning memory for instance %s" % self.instance.name, delta,
2963
            self.instance.hypervisor,
2964
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2965

    
2966
    # make self.cluster visible in the functions below
2967
    cluster = self.cluster
2968

    
2969
    def _PrepareNicCreate(_, params, private):
2970
      self._PrepareNicModification(params, private, None, None,
2971
                                   {}, cluster, pnode_uuid)
2972
      return (None, None)
2973

    
2974
    def _PrepareNicMod(_, nic, params, private):
2975
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2976
                                   nic.nicparams, cluster, pnode_uuid)
2977
      return None
2978

    
2979
    def _PrepareNicRemove(_, params, __):
2980
      ip = params.ip
2981
      net = params.network
2982
      if net is not None and ip is not None:
2983
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2984

    
2985
    # Verify NIC changes (operating on copy)
2986
    nics = self.instance.nics[:]
2987
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2988
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2989
    if len(nics) > constants.MAX_NICS:
2990
      raise errors.OpPrereqError("Instance has too many network interfaces"
2991
                                 " (%d), cannot add more" % constants.MAX_NICS,
2992
                                 errors.ECODE_STATE)
2993

    
2994
    # Pre-compute NIC changes (necessary to use result in hooks)
2995
    self._nic_chgdesc = []
2996
    if self.nicmod:
2997
      # Operate on copies as this is still in prereq
2998
      nics = [nic.Copy() for nic in self.instance.nics]
2999
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3000
                          self._CreateNewNic, self._ApplyNicMods, None)
3001
      # Verify that NIC names are unique and valid
3002
      utils.ValidateDeviceNames("NIC", nics)
3003
      self._new_nics = nics
3004
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3005
    else:
3006
      self._new_nics = None
3007
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3008

    
3009
    if not self.op.ignore_ipolicy:
3010
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3011
                                                              group_info)
3012

    
3013
      # Fill ispec with backend parameters
3014
      ispec[constants.ISPEC_SPINDLE_USE] = \
3015
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3016
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3017
                                                         None)
3018

    
3019
      # Copy ispec to verify parameters with min/max values separately
3020
      if self.op.disk_template:
3021
        new_disk_template = self.op.disk_template
3022
      else:
3023
        new_disk_template = self.instance.disk_template
3024
      ispec_max = ispec.copy()
3025
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3026
        self.be_new.get(constants.BE_MAXMEM, None)
3027
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3028
                                                     new_disk_template)
3029
      ispec_min = ispec.copy()
3030
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3031
        self.be_new.get(constants.BE_MINMEM, None)
3032
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3033
                                                     new_disk_template)
3034

    
3035
      if (res_max or res_min):
3036
        # FIXME: Improve error message by including information about whether
3037
        # the upper or lower limit of the parameter fails the ipolicy.
3038
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3039
               (group_info, group_info.name,
3040
                utils.CommaJoin(set(res_max + res_min))))
3041
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3042

    
3043
  def _ConvertPlainToDrbd(self, feedback_fn):
3044
    """Converts an instance from plain to drbd.
3045

3046
    """
3047
    feedback_fn("Converting template to drbd")
3048
    pnode_uuid = self.instance.primary_node
3049
    snode_uuid = self.op.remote_node_uuid
3050

    
3051
    assert self.instance.disk_template == constants.DT_PLAIN
3052

    
3053
    # create a fake disk info for _GenerateDiskTemplate
3054
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3055
                  constants.IDISK_VG: d.logical_id[0],
3056
                  constants.IDISK_NAME: d.name}
3057
                 for d in self.instance.disks]
3058
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3059
                                     self.instance.uuid, pnode_uuid,
3060
                                     [snode_uuid], disk_info, None, None, 0,
3061
                                     feedback_fn, self.diskparams)
3062
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3063
                                        self.diskparams)
3064
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3065
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3066
    info = GetInstanceInfoText(self.instance)
3067
    feedback_fn("Creating additional volumes...")
3068
    # first, create the missing data and meta devices
3069
    for disk in anno_disks:
3070
      # unfortunately this is... not too nice
3071
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3072
                           info, True, p_excl_stor)
3073
      for child in disk.children:
3074
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3075
                             s_excl_stor)
3076
    # at this stage, all new LVs have been created, we can rename the
3077
    # old ones
3078
    feedback_fn("Renaming original volumes...")
3079
    rename_list = [(o, n.children[0].logical_id)
3080
                   for (o, n) in zip(self.instance.disks, new_disks)]
3081
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3082
    result.Raise("Failed to rename original LVs")
3083

    
3084
    feedback_fn("Initializing DRBD devices...")
3085
    # all child devices are in place, we can now create the DRBD devices
3086
    try:
3087
      for disk in anno_disks:
3088
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3089
                                       (snode_uuid, s_excl_stor)]:
3090
          f_create = node_uuid == pnode_uuid
3091
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3092
                               f_create, excl_stor)
3093
    except errors.GenericError, e:
3094
      feedback_fn("Initializing of DRBD devices failed;"
3095
                  " renaming back original volumes...")
3096
      for disk in new_disks:
3097
        self.cfg.SetDiskID(disk, pnode_uuid)
3098
      rename_back_list = [(n.children[0], o.logical_id)
3099
                          for (n, o) in zip(new_disks, self.instance.disks)]
3100
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3101
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3102
      raise
3103

    
3104
    # at this point, the instance has been modified
3105
    self.instance.disk_template = constants.DT_DRBD8
3106
    self.instance.disks = new_disks
3107
    self.cfg.Update(self.instance, feedback_fn)
3108

    
3109
    # Release node locks while waiting for sync
3110
    ReleaseLocks(self, locking.LEVEL_NODE)
3111

    
3112
    # disks are created, waiting for sync
3113
    disk_abort = not WaitForSync(self, self.instance,
3114
                                 oneshot=not self.op.wait_for_sync)
3115
    if disk_abort:
3116
      raise errors.OpExecError("There are some degraded disks for"
3117
                               " this instance, please cleanup manually")
3118

    
3119
    # Node resource locks will be released by caller
3120

    
3121
  def _ConvertDrbdToPlain(self, feedback_fn):
3122
    """Converts an instance from drbd to plain.
3123

3124
    """
3125
    assert len(self.instance.secondary_nodes) == 1
3126
    assert self.instance.disk_template == constants.DT_DRBD8
3127

    
3128
    pnode_uuid = self.instance.primary_node
3129
    snode_uuid = self.instance.secondary_nodes[0]
3130
    feedback_fn("Converting template to plain")
3131

    
3132
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3133
    new_disks = [d.children[0] for d in self.instance.disks]
3134

    
3135
    # copy over size, mode and name
3136
    for parent, child in zip(old_disks, new_disks):
3137
      child.size = parent.size
3138
      child.mode = parent.mode
3139
      child.name = parent.name
3140

    
3141
    # this is a DRBD disk, return its port to the pool
3142
    # NOTE: this must be done right before the call to cfg.Update!
3143
    for disk in old_disks:
3144
      tcp_port = disk.logical_id[2]
3145
      self.cfg.AddTcpUdpPort(tcp_port)
3146

    
3147
    # update instance structure
3148
    self.instance.disks = new_disks
3149
    self.instance.disk_template = constants.DT_PLAIN
3150
    _UpdateIvNames(0, self.instance.disks)
3151
    self.cfg.Update(self.instance, feedback_fn)
3152

    
3153
    # Release locks in case removing disks takes a while
3154
    ReleaseLocks(self, locking.LEVEL_NODE)
3155

    
3156
    feedback_fn("Removing volumes on the secondary node...")
3157
    for disk in old_disks:
3158
      self.cfg.SetDiskID(disk, snode_uuid)
3159
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3160
      if msg:
3161
        self.LogWarning("Could not remove block device %s on node %s,"
3162
                        " continuing anyway: %s", disk.iv_name,
3163
                        self.cfg.GetNodeName(snode_uuid), msg)
3164

    
3165
    feedback_fn("Removing unneeded volumes on the primary node...")
3166
    for idx, disk in enumerate(old_disks):
3167
      meta = disk.children[1]
3168
      self.cfg.SetDiskID(meta, pnode_uuid)
3169
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3170
      if msg:
3171
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3172
                        " continuing anyway: %s", idx,
3173
                        self.cfg.GetNodeName(pnode_uuid), msg)
3174

    
3175
  def _CreateNewDisk(self, idx, params, _):
3176
    """Creates a new disk.
3177

3178
    """
3179
    # add a new disk
3180
    if self.instance.disk_template in constants.DTS_FILEBASED:
3181
      (file_driver, file_path) = self.instance.disks[0].logical_id
3182
      file_path = os.path.dirname(file_path)
3183
    else:
3184
      file_driver = file_path = None
3185

    
3186
    disk = \
3187
      GenerateDiskTemplate(self, self.instance.disk_template,
3188
                           self.instance.uuid, self.instance.primary_node,
3189
                           self.instance.secondary_nodes, [params], file_path,
3190
                           file_driver, idx, self.Log, self.diskparams)[0]
3191

    
3192
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3193

    
3194
    if self.cluster.prealloc_wipe_disks:
3195
      # Wipe new disk
3196
      WipeOrCleanupDisks(self, self.instance,
3197
                         disks=[(idx, disk, 0)],
3198
                         cleanup=new_disks)
3199

    
3200
    return (disk, [
3201
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3202
      ])
3203

    
3204
  @staticmethod
3205
  def _ModifyDisk(idx, disk, params, _):
3206
    """Modifies a disk.
3207

3208
    """
3209
    changes = []
3210
    mode = params.get(constants.IDISK_MODE, None)
3211
    if mode:
3212
      disk.mode = mode
3213
      changes.append(("disk.mode/%d" % idx, disk.mode))
3214

    
3215
    name = params.get(constants.IDISK_NAME, None)
3216
    disk.name = name
3217
    changes.append(("disk.name/%d" % idx, disk.name))
3218

    
3219
    return changes
3220

    
3221
  def _RemoveDisk(self, idx, root, _):
3222
    """Removes a disk.
3223

3224
    """
3225
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3226
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3227
                             self.instance.primary_node):
3228
      self.cfg.SetDiskID(disk, node_uuid)
3229
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3230
      if msg:
3231
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3232
                        " continuing anyway", idx,
3233
                        self.cfg.GetNodeName(node_uuid), msg)
3234

    
3235
    # if this is a DRBD disk, return its port to the pool
3236
    if root.dev_type in constants.LDS_DRBD:
3237
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3238

    
3239
  def _CreateNewNic(self, idx, params, private):
3240
    """Creates data structure for a new network interface.
3241

3242
    """
3243
    mac = params[constants.INIC_MAC]
3244
    ip = params.get(constants.INIC_IP, None)
3245
    net = params.get(constants.INIC_NETWORK, None)
3246
    name = params.get(constants.INIC_NAME, None)
3247
    net_uuid = self.cfg.LookupNetwork(net)
3248
    #TODO: not private.filled?? can a nic have no nicparams??
3249
    nicparams = private.filled
3250
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3251
                       nicparams=nicparams)
3252
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3253

    
3254
    return (nobj, [
3255
      ("nic.%d" % idx,
3256
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3257
       (mac, ip, private.filled[constants.NIC_MODE],
3258
       private.filled[constants.NIC_LINK],
3259
       net)),
3260
      ])
3261

    
3262
  def _ApplyNicMods(self, idx, nic, params, private):
3263
    """Modifies a network interface.
3264

3265
    """
3266
    changes = []
3267

    
3268
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3269
      if key in params:
3270
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3271
        setattr(nic, key, params[key])
3272

    
3273
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3274
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3275
    if new_net_uuid != nic.network:
3276
      changes.append(("nic.network/%d" % idx, new_net))
3277
      nic.network = new_net_uuid
3278

    
3279
    if private.filled:
3280
      nic.nicparams = private.filled
3281

    
3282
      for (key, val) in nic.nicparams.items():
3283
        changes.append(("nic.%s/%d" % (key, idx), val))
3284

    
3285
    return changes
3286

    
3287
  def Exec(self, feedback_fn):
3288
    """Modifies an instance.
3289

3290
    All parameters take effect only at the next restart of the instance.
3291

3292
    """
3293
    # Process here the warnings from CheckPrereq, as we don't have a
3294
    # feedback_fn there.
3295
    # TODO: Replace with self.LogWarning
3296
    for warn in self.warn:
3297
      feedback_fn("WARNING: %s" % warn)
3298

    
3299
    assert ((self.op.disk_template is None) ^
3300
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3301
      "Not owning any node resource locks"
3302

    
3303
    result = []
3304

    
3305
    # New primary node
3306
    if self.op.pnode_uuid:
3307
      self.instance.primary_node = self.op.pnode_uuid
3308

    
3309
    # runtime memory
3310
    if self.op.runtime_mem:
3311
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3312
                                                     self.instance,
3313
                                                     self.op.runtime_mem)
3314
      rpcres.Raise("Cannot modify instance runtime memory")
3315
      result.append(("runtime_memory", self.op.runtime_mem))
3316

    
3317
    # Apply disk changes
3318
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3319
                        self._CreateNewDisk, self._ModifyDisk,
3320
                        self._RemoveDisk)
3321
    _UpdateIvNames(0, self.instance.disks)
3322

    
3323
    if self.op.disk_template:
3324
      if __debug__:
3325
        check_nodes = set(self.instance.all_nodes)
3326
        if self.op.remote_node_uuid:
3327
          check_nodes.add(self.op.remote_node_uuid)
3328
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3329
          owned = self.owned_locks(level)
3330
          assert not (check_nodes - owned), \
3331
            ("Not owning the correct locks, owning %r, expected at least %r" %
3332
             (owned, check_nodes))
3333

    
3334
      r_shut = ShutdownInstanceDisks(self, self.instance)
3335
      if not r_shut:
3336
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3337
                                 " proceed with disk template conversion")
3338
      mode = (self.instance.disk_template, self.op.disk_template)
3339
      try:
3340
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3341
      except:
3342
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3343
        raise
3344
      result.append(("disk_template", self.op.disk_template))
3345

    
3346
      assert self.instance.disk_template == self.op.disk_template, \
3347
        ("Expected disk template '%s', found '%s'" %
3348
         (self.op.disk_template, self.instance.disk_template))
3349

    
3350
    # Release node and resource locks if there are any (they might already have
3351
    # been released during disk conversion)
3352
    ReleaseLocks(self, locking.LEVEL_NODE)
3353
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3354

    
3355
    # Apply NIC changes
3356
    if self._new_nics is not None:
3357
      self.instance.nics = self._new_nics
3358
      result.extend(self._nic_chgdesc)
3359

    
3360
    # hvparams changes
3361
    if self.op.hvparams:
3362
      self.instance.hvparams = self.hv_inst
3363
      for key, val in self.op.hvparams.iteritems():
3364
        result.append(("hv/%s" % key, val))
3365

    
3366
    # beparams changes
3367
    if self.op.beparams:
3368
      self.instance.beparams = self.be_inst
3369
      for key, val in self.op.beparams.iteritems():
3370
        result.append(("be/%s" % key, val))
3371

    
3372
    # OS change
3373
    if self.op.os_name:
3374
      self.instance.os = self.op.os_name
3375

    
3376
    # osparams changes
3377
    if self.op.osparams:
3378
      self.instance.osparams = self.os_inst
3379
      for key, val in self.op.osparams.iteritems():
3380
        result.append(("os/%s" % key, val))
3381

    
3382
    if self.op.offline is None:
3383
      # Ignore
3384
      pass
3385
    elif self.op.offline:
3386
      # Mark instance as offline
3387
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3388
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3389
    else:
3390
      # Mark instance as online, but stopped
3391
      self.cfg.MarkInstanceDown(self.instance.uuid)
3392
      result.append(("admin_state", constants.ADMINST_DOWN))
3393

    
3394
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3395

    
3396
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3397
                self.owned_locks(locking.LEVEL_NODE)), \
3398
      "All node locks should have been released by now"
3399

    
3400
    return result
3401

    
3402
  _DISK_CONVERSIONS = {
3403
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3404
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3405
    }
3406

    
3407

    
3408
class LUInstanceChangeGroup(LogicalUnit):
3409
  HPATH = "instance-change-group"
3410
  HTYPE = constants.HTYPE_INSTANCE
3411
  REQ_BGL = False
3412

    
3413
  def ExpandNames(self):
3414
    self.share_locks = ShareAll()
3415

    
3416
    self.needed_locks = {
3417
      locking.LEVEL_NODEGROUP: [],
3418
      locking.LEVEL_NODE: [],
3419
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3420
      }
3421

    
3422
    self._ExpandAndLockInstance()
3423

    
3424
    if self.op.target_groups:
3425
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3426
                                  self.op.target_groups)
3427
    else:
3428
      self.req_target_uuids = None
3429

    
3430
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3431

    
3432
  def DeclareLocks(self, level):
3433
    if level == locking.LEVEL_NODEGROUP:
3434
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3435

    
3436
      if self.req_target_uuids:
3437
        lock_groups = set(self.req_target_uuids)
3438

    
3439
        # Lock all groups used by instance optimistically; this requires going
3440
        # via the node before it's locked, requiring verification later on
3441
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3442
        lock_groups.update(instance_groups)
3443
      else:
3444
        # No target groups, need to lock all of them
3445
        lock_groups = locking.ALL_SET
3446

    
3447
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3448

    
3449
    elif level == locking.LEVEL_NODE:
3450
      if self.req_target_uuids:
3451
        # Lock all nodes used by instances
3452
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3453
        self._LockInstancesNodes()
3454

    
3455
        # Lock all nodes in all potential target groups
3456
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3457
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3458
        member_nodes = [node_uuid
3459
                        for group in lock_groups
3460
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3461
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3462
      else:
3463
        # Lock all nodes as all groups are potential targets
3464
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3465

    
3466
  def CheckPrereq(self):
3467
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3468
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3469
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3470

    
3471
    assert (self.req_target_uuids is None or
3472
            owned_groups.issuperset(self.req_target_uuids))
3473
    assert owned_instance_names == set([self.op.instance_name])
3474

    
3475
    # Get instance information
3476
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3477

    
3478
    # Check if node groups for locked instance are still correct
3479
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3480
      ("Instance %s's nodes changed while we kept the lock" %
3481
       self.op.instance_name)
3482

    
3483
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3484
                                          owned_groups)
3485

    
3486
    if self.req_target_uuids:
3487
      # User requested specific target groups
3488
      self.target_uuids = frozenset(self.req_target_uuids)
3489
    else:
3490
      # All groups except those used by the instance are potential targets
3491
      self.target_uuids = owned_groups - inst_groups
3492

    
3493
    conflicting_groups = self.target_uuids & inst_groups
3494
    if conflicting_groups:
3495
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3496
                                 " used by the instance '%s'" %
3497
                                 (utils.CommaJoin(conflicting_groups),
3498
                                  self.op.instance_name),
3499
                                 errors.ECODE_INVAL)
3500

    
3501
    if not self.target_uuids:
3502
      raise errors.OpPrereqError("There are no possible target groups",
3503
                                 errors.ECODE_INVAL)
3504

    
3505
  def BuildHooksEnv(self):
3506
    """Build hooks env.
3507

3508
    """
3509
    assert self.target_uuids
3510

    
3511
    env = {
3512
      "TARGET_GROUPS": " ".join(self.target_uuids),
3513
      }
3514

    
3515
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3516

    
3517
    return env
3518

    
3519
  def BuildHooksNodes(self):
3520
    """Build hooks nodes.
3521

3522
    """
3523
    mn = self.cfg.GetMasterNode()
3524
    return ([mn], [mn])
3525

    
3526
  def Exec(self, feedback_fn):
3527
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3528

    
3529
    assert instances == [self.op.instance_name], "Instance not locked"
3530

    
3531
    req = iallocator.IAReqGroupChange(instances=instances,
3532
                                      target_groups=list(self.target_uuids))
3533
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3534

    
3535
    ial.Run(self.op.iallocator)
3536

    
3537
    if not ial.success:
3538
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3539
                                 " instance '%s' using iallocator '%s': %s" %
3540
                                 (self.op.instance_name, self.op.iallocator,
3541
                                  ial.info), errors.ECODE_NORES)
3542

    
3543
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3544

    
3545
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3546
                 " instance '%s'", len(jobs), self.op.instance_name)
3547

    
3548
    return ResultWithJobs(jobs)