Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 850c53f1

History | View | Annotate | Download (139.4 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234

    
235
    check_params = cluster.SimpleFillNIC(nicparams)
236
    objects.NIC.CheckParameterSyntax(check_params)
237
    net_uuid = cfg.LookupNetwork(net)
238
    name = nic.get(constants.INIC_NAME, None)
239
    if name is not None and name.lower() == constants.VALUE_NONE:
240
      name = None
241
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242
                          network=net_uuid, nicparams=nicparams)
243
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
244
    nics.append(nic_obj)
245

    
246
  return nics
247

    
248

    
249
def _CheckForConflictingIp(lu, ip, node_uuid):
250
  """In case of conflicting IP address raise error.
251

252
  @type ip: string
253
  @param ip: IP address
254
  @type node_uuid: string
255
  @param node_uuid: node UUID
256

257
  """
258
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259
  if conf_net is not None:
260
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261
                                " network %s, but the target NIC does not." %
262
                                (ip, conf_net)),
263
                               errors.ECODE_STATE)
264

    
265
  return (None, None)
266

    
267

    
268
def _ComputeIPolicyInstanceSpecViolation(
269
  ipolicy, instance_spec, disk_template,
270
  _compute_fn=ComputeIPolicySpecViolation):
271
  """Compute if instance specs meets the specs of ipolicy.
272

273
  @type ipolicy: dict
274
  @param ipolicy: The ipolicy to verify against
275
  @param instance_spec: dict
276
  @param instance_spec: The instance spec to verify
277
  @type disk_template: string
278
  @param disk_template: the disk template of the instance
279
  @param _compute_fn: The function to verify ipolicy (unittest only)
280
  @see: L{ComputeIPolicySpecViolation}
281

282
  """
283
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289

    
290
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291
                     disk_sizes, spindle_use, disk_template)
292

    
293

    
294
def _CheckOSVariant(os_obj, name):
295
  """Check whether an OS name conforms to the os variants specification.
296

297
  @type os_obj: L{objects.OS}
298
  @param os_obj: OS object to check
299
  @type name: string
300
  @param name: OS name passed by the user, to check for validity
301

302
  """
303
  variant = objects.OS.GetVariant(name)
304
  if not os_obj.supported_variants:
305
    if variant:
306
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307
                                 " passed)" % (os_obj.name, variant),
308
                                 errors.ECODE_INVAL)
309
    return
310
  if not variant:
311
    raise errors.OpPrereqError("OS name must include a variant",
312
                               errors.ECODE_INVAL)
313

    
314
  if variant not in os_obj.supported_variants:
315
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316

    
317

    
318
class LUInstanceCreate(LogicalUnit):
319
  """Create an instance.
320

321
  """
322
  HPATH = "instance-add"
323
  HTYPE = constants.HTYPE_INSTANCE
324
  REQ_BGL = False
325

    
326
  def _CheckDiskTemplateValid(self):
327
    """Checks validity of disk template.
328

329
    """
330
    cluster = self.cfg.GetClusterInfo()
331
    if self.op.disk_template is None:
332
      # FIXME: It would be better to take the default disk template from the
333
      # ipolicy, but for the ipolicy we need the primary node, which we get from
334
      # the iallocator, which wants the disk template as input. To solve this
335
      # chicken-and-egg problem, it should be possible to specify just a node
336
      # group from the iallocator and take the ipolicy from that.
337
      self.op.disk_template = cluster.enabled_disk_templates[0]
338
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339

    
340
  def _CheckDiskArguments(self):
341
    """Checks validity of disk-related arguments.
342

343
    """
344
    # check that disk's names are unique and valid
345
    utils.ValidateDeviceNames("disk", self.op.disks)
346

    
347
    self._CheckDiskTemplateValid()
348

    
349
    # check disks. parameter names and consistent adopt/no-adopt strategy
350
    has_adopt = has_no_adopt = False
351
    for disk in self.op.disks:
352
      if self.op.disk_template != constants.DT_EXT:
353
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354
      if constants.IDISK_ADOPT in disk:
355
        has_adopt = True
356
      else:
357
        has_no_adopt = True
358
    if has_adopt and has_no_adopt:
359
      raise errors.OpPrereqError("Either all disks are adopted or none is",
360
                                 errors.ECODE_INVAL)
361
    if has_adopt:
362
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363
        raise errors.OpPrereqError("Disk adoption is not supported for the"
364
                                   " '%s' disk template" %
365
                                   self.op.disk_template,
366
                                   errors.ECODE_INVAL)
367
      if self.op.iallocator is not None:
368
        raise errors.OpPrereqError("Disk adoption not allowed with an"
369
                                   " iallocator script", errors.ECODE_INVAL)
370
      if self.op.mode == constants.INSTANCE_IMPORT:
371
        raise errors.OpPrereqError("Disk adoption not allowed for"
372
                                   " instance import", errors.ECODE_INVAL)
373
    else:
374
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
375
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376
                                   " but no 'adopt' parameter given" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379

    
380
    self.adopt_disks = has_adopt
381

    
382
  def CheckArguments(self):
383
    """Check arguments.
384

385
    """
386
    # do not require name_check to ease forward/backward compatibility
387
    # for tools
388
    if self.op.no_install and self.op.start:
389
      self.LogInfo("No-installation mode selected, disabling startup")
390
      self.op.start = False
391
    # validate/normalize the instance name
392
    self.op.instance_name = \
393
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
394

    
395
    if self.op.ip_check and not self.op.name_check:
396
      # TODO: make the ip check more flexible and not depend on the name check
397
      raise errors.OpPrereqError("Cannot do IP address check without a name"
398
                                 " check", errors.ECODE_INVAL)
399

    
400
    # check nics' parameter names
401
    for nic in self.op.nics:
402
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403
    # check that NIC's parameters names are unique and valid
404
    utils.ValidateDeviceNames("NIC", self.op.nics)
405

    
406
    self._CheckDiskArguments()
407

    
408
    # instance name verification
409
    if self.op.name_check:
410
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411
      self.op.instance_name = self.hostname.name
412
      # used in CheckPrereq for ip ping check
413
      self.check_ip = self.hostname.ip
414
    else:
415
      self.check_ip = None
416

    
417
    # file storage checks
418
    if (self.op.file_driver and
419
        not self.op.file_driver in constants.FILE_DRIVER):
420
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
421
                                 self.op.file_driver, errors.ECODE_INVAL)
422

    
423
    ### Node/iallocator related checks
424
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
425

    
426
    if self.op.pnode is not None:
427
      if self.op.disk_template in constants.DTS_INT_MIRROR:
428
        if self.op.snode is None:
429
          raise errors.OpPrereqError("The networked disk templates need"
430
                                     " a mirror node", errors.ECODE_INVAL)
431
      elif self.op.snode:
432
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
433
                        " template")
434
        self.op.snode = None
435

    
436
    _CheckOpportunisticLocking(self.op)
437

    
438
    self._cds = GetClusterDomainSecret()
439

    
440
    if self.op.mode == constants.INSTANCE_IMPORT:
441
      # On import force_variant must be True, because if we forced it at
442
      # initial install, our only chance when importing it back is that it
443
      # works again!
444
      self.op.force_variant = True
445

    
446
      if self.op.no_install:
447
        self.LogInfo("No-installation mode has no effect during import")
448

    
449
    elif self.op.mode == constants.INSTANCE_CREATE:
450
      if self.op.os_type is None:
451
        raise errors.OpPrereqError("No guest OS specified",
452
                                   errors.ECODE_INVAL)
453
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
454
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
455
                                   " installation" % self.op.os_type,
456
                                   errors.ECODE_STATE)
457
      if self.op.disk_template is None:
458
        raise errors.OpPrereqError("No disk template specified",
459
                                   errors.ECODE_INVAL)
460

    
461
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
462
      # Check handshake to ensure both clusters have the same domain secret
463
      src_handshake = self.op.source_handshake
464
      if not src_handshake:
465
        raise errors.OpPrereqError("Missing source handshake",
466
                                   errors.ECODE_INVAL)
467

    
468
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
469
                                                           src_handshake)
470
      if errmsg:
471
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
472
                                   errors.ECODE_INVAL)
473

    
474
      # Load and check source CA
475
      self.source_x509_ca_pem = self.op.source_x509_ca
476
      if not self.source_x509_ca_pem:
477
        raise errors.OpPrereqError("Missing source X509 CA",
478
                                   errors.ECODE_INVAL)
479

    
480
      try:
481
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
482
                                                    self._cds)
483
      except OpenSSL.crypto.Error, err:
484
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
485
                                   (err, ), errors.ECODE_INVAL)
486

    
487
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
488
      if errcode is not None:
489
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
490
                                   errors.ECODE_INVAL)
491

    
492
      self.source_x509_ca = cert
493

    
494
      src_instance_name = self.op.source_instance_name
495
      if not src_instance_name:
496
        raise errors.OpPrereqError("Missing source instance name",
497
                                   errors.ECODE_INVAL)
498

    
499
      self.source_instance_name = \
500
        netutils.GetHostname(name=src_instance_name).name
501

    
502
    else:
503
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
504
                                 self.op.mode, errors.ECODE_INVAL)
505

    
506
  def ExpandNames(self):
507
    """ExpandNames for CreateInstance.
508

509
    Figure out the right locks for instance creation.
510

511
    """
512
    self.needed_locks = {}
513

    
514
    # this is just a preventive check, but someone might still add this
515
    # instance in the meantime, and creation will fail at lock-add time
516
    if self.op.instance_name in\
517
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
518
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
519
                                 self.op.instance_name, errors.ECODE_EXISTS)
520

    
521
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
522

    
523
    if self.op.iallocator:
524
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
525
      # specifying a group on instance creation and then selecting nodes from
526
      # that group
527
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
528
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
529

    
530
      if self.op.opportunistic_locking:
531
        self.opportunistic_locks[locking.LEVEL_NODE] = True
532
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
533
    else:
534
      (self.op.pnode_uuid, self.op.pnode) = \
535
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
536
      nodelist = [self.op.pnode_uuid]
537
      if self.op.snode is not None:
538
        (self.op.snode_uuid, self.op.snode) = \
539
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
540
        nodelist.append(self.op.snode_uuid)
541
      self.needed_locks[locking.LEVEL_NODE] = nodelist
542

    
543
    # in case of import lock the source node too
544
    if self.op.mode == constants.INSTANCE_IMPORT:
545
      src_node = self.op.src_node
546
      src_path = self.op.src_path
547

    
548
      if src_path is None:
549
        self.op.src_path = src_path = self.op.instance_name
550

    
551
      if src_node is None:
552
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
553
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
554
        self.op.src_node = None
555
        if os.path.isabs(src_path):
556
          raise errors.OpPrereqError("Importing an instance from a path"
557
                                     " requires a source node option",
558
                                     errors.ECODE_INVAL)
559
      else:
560
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
561
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
562
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
563
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
564
        if not os.path.isabs(src_path):
565
          self.op.src_path = src_path = \
566
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
567

    
568
    self.needed_locks[locking.LEVEL_NODE_RES] = \
569
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
570

    
571
  def _RunAllocator(self):
572
    """Run the allocator based on input opcode.
573

574
    """
575
    if self.op.opportunistic_locking:
576
      # Only consider nodes for which a lock is held
577
      node_name_whitelist = self.cfg.GetNodeNames(
578
        self.owned_locks(locking.LEVEL_NODE))
579
    else:
580
      node_name_whitelist = None
581

    
582
    #TODO Export network to iallocator so that it chooses a pnode
583
    #     in a nodegroup that has the desired network connected to
584
    req = _CreateInstanceAllocRequest(self.op, self.disks,
585
                                      self.nics, self.be_full,
586
                                      node_name_whitelist)
587
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
588

    
589
    ial.Run(self.op.iallocator)
590

    
591
    if not ial.success:
592
      # When opportunistic locks are used only a temporary failure is generated
593
      if self.op.opportunistic_locking:
594
        ecode = errors.ECODE_TEMP_NORES
595
      else:
596
        ecode = errors.ECODE_NORES
597

    
598
      raise errors.OpPrereqError("Can't compute nodes using"
599
                                 " iallocator '%s': %s" %
600
                                 (self.op.iallocator, ial.info),
601
                                 ecode)
602

    
603
    (self.op.pnode_uuid, self.op.pnode) = \
604
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
605
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
606
                 self.op.instance_name, self.op.iallocator,
607
                 utils.CommaJoin(ial.result))
608

    
609
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
610

    
611
    if req.RequiredNodes() == 2:
612
      (self.op.snode_uuid, self.op.snode) = \
613
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
614

    
615
  def BuildHooksEnv(self):
616
    """Build hooks env.
617

618
    This runs on master, primary and secondary nodes of the instance.
619

620
    """
621
    env = {
622
      "ADD_MODE": self.op.mode,
623
      }
624
    if self.op.mode == constants.INSTANCE_IMPORT:
625
      env["SRC_NODE"] = self.op.src_node
626
      env["SRC_PATH"] = self.op.src_path
627
      env["SRC_IMAGES"] = self.src_images
628

    
629
    env.update(BuildInstanceHookEnv(
630
      name=self.op.instance_name,
631
      primary_node_name=self.op.pnode,
632
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
633
      status=self.op.start,
634
      os_type=self.op.os_type,
635
      minmem=self.be_full[constants.BE_MINMEM],
636
      maxmem=self.be_full[constants.BE_MAXMEM],
637
      vcpus=self.be_full[constants.BE_VCPUS],
638
      nics=NICListToTuple(self, self.nics),
639
      disk_template=self.op.disk_template,
640
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
641
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
642
             for d in self.disks],
643
      bep=self.be_full,
644
      hvp=self.hv_full,
645
      hypervisor_name=self.op.hypervisor,
646
      tags=self.op.tags,
647
      ))
648

    
649
    return env
650

    
651
  def BuildHooksNodes(self):
652
    """Build hooks nodes.
653

654
    """
655
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
656
    return nl, nl
657

    
658
  def _ReadExportInfo(self):
659
    """Reads the export information from disk.
660

661
    It will override the opcode source node and path with the actual
662
    information, if these two were not specified before.
663

664
    @return: the export information
665

666
    """
667
    assert self.op.mode == constants.INSTANCE_IMPORT
668

    
669
    if self.op.src_node_uuid is None:
670
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
671
      exp_list = self.rpc.call_export_list(locked_nodes)
672
      found = False
673
      for node in exp_list:
674
        if exp_list[node].fail_msg:
675
          continue
676
        if self.op.src_path in exp_list[node].payload:
677
          found = True
678
          self.op.src_node = node
679
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
680
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
681
                                            self.op.src_path)
682
          break
683
      if not found:
684
        raise errors.OpPrereqError("No export found for relative path %s" %
685
                                   self.op.src_path, errors.ECODE_INVAL)
686

    
687
    CheckNodeOnline(self, self.op.src_node_uuid)
688
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
689
    result.Raise("No export or invalid export found in dir %s" %
690
                 self.op.src_path)
691

    
692
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
693
    if not export_info.has_section(constants.INISECT_EXP):
694
      raise errors.ProgrammerError("Corrupted export config",
695
                                   errors.ECODE_ENVIRON)
696

    
697
    ei_version = export_info.get(constants.INISECT_EXP, "version")
698
    if int(ei_version) != constants.EXPORT_VERSION:
699
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
700
                                 (ei_version, constants.EXPORT_VERSION),
701
                                 errors.ECODE_ENVIRON)
702
    return export_info
703

    
704
  def _ReadExportParams(self, einfo):
705
    """Use export parameters as defaults.
706

707
    In case the opcode doesn't specify (as in override) some instance
708
    parameters, then try to use them from the export information, if
709
    that declares them.
710

711
    """
712
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
713

    
714
    if not self.op.disks:
715
      disks = []
716
      # TODO: import the disk iv_name too
717
      for idx in range(constants.MAX_DISKS):
718
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
719
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
720
          disks.append({constants.IDISK_SIZE: disk_sz})
721
      self.op.disks = disks
722
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
723
        raise errors.OpPrereqError("No disk info specified and the export"
724
                                   " is missing the disk information",
725
                                   errors.ECODE_INVAL)
726

    
727
    if not self.op.nics:
728
      nics = []
729
      for idx in range(constants.MAX_NICS):
730
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
731
          ndict = {}
732
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
733
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
734
            ndict[name] = v
735
          nics.append(ndict)
736
        else:
737
          break
738
      self.op.nics = nics
739

    
740
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
741
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
742

    
743
    if (self.op.hypervisor is None and
744
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
745
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
746

    
747
    if einfo.has_section(constants.INISECT_HYP):
748
      # use the export parameters but do not override the ones
749
      # specified by the user
750
      for name, value in einfo.items(constants.INISECT_HYP):
751
        if name not in self.op.hvparams:
752
          self.op.hvparams[name] = value
753

    
754
    if einfo.has_section(constants.INISECT_BEP):
755
      # use the parameters, without overriding
756
      for name, value in einfo.items(constants.INISECT_BEP):
757
        if name not in self.op.beparams:
758
          self.op.beparams[name] = value
759
        # Compatibility for the old "memory" be param
760
        if name == constants.BE_MEMORY:
761
          if constants.BE_MAXMEM not in self.op.beparams:
762
            self.op.beparams[constants.BE_MAXMEM] = value
763
          if constants.BE_MINMEM not in self.op.beparams:
764
            self.op.beparams[constants.BE_MINMEM] = value
765
    else:
766
      # try to read the parameters old style, from the main section
767
      for name in constants.BES_PARAMETERS:
768
        if (name not in self.op.beparams and
769
            einfo.has_option(constants.INISECT_INS, name)):
770
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
771

    
772
    if einfo.has_section(constants.INISECT_OSP):
773
      # use the parameters, without overriding
774
      for name, value in einfo.items(constants.INISECT_OSP):
775
        if name not in self.op.osparams:
776
          self.op.osparams[name] = value
777

    
778
  def _RevertToDefaults(self, cluster):
779
    """Revert the instance parameters to the default values.
780

781
    """
782
    # hvparams
783
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
784
    for name in self.op.hvparams.keys():
785
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
786
        del self.op.hvparams[name]
787
    # beparams
788
    be_defs = cluster.SimpleFillBE({})
789
    for name in self.op.beparams.keys():
790
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
791
        del self.op.beparams[name]
792
    # nic params
793
    nic_defs = cluster.SimpleFillNIC({})
794
    for nic in self.op.nics:
795
      for name in constants.NICS_PARAMETERS:
796
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
797
          del nic[name]
798
    # osparams
799
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
800
    for name in self.op.osparams.keys():
801
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
802
        del self.op.osparams[name]
803

    
804
  def _CalculateFileStorageDir(self):
805
    """Calculate final instance file storage dir.
806

807
    """
808
    # file storage dir calculation/check
809
    self.instance_file_storage_dir = None
810
    if self.op.disk_template in constants.DTS_FILEBASED:
811
      # build the full file storage dir path
812
      joinargs = []
813

    
814
      if self.op.disk_template == constants.DT_SHARED_FILE:
815
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
816
      else:
817
        get_fsd_fn = self.cfg.GetFileStorageDir
818

    
819
      cfg_storagedir = get_fsd_fn()
820
      if not cfg_storagedir:
821
        raise errors.OpPrereqError("Cluster file storage dir not defined",
822
                                   errors.ECODE_STATE)
823
      joinargs.append(cfg_storagedir)
824

    
825
      if self.op.file_storage_dir is not None:
826
        joinargs.append(self.op.file_storage_dir)
827

    
828
      joinargs.append(self.op.instance_name)
829

    
830
      # pylint: disable=W0142
831
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
832

    
833
  def CheckPrereq(self): # pylint: disable=R0914
834
    """Check prerequisites.
835

836
    """
837
    self._CalculateFileStorageDir()
838

    
839
    if self.op.mode == constants.INSTANCE_IMPORT:
840
      export_info = self._ReadExportInfo()
841
      self._ReadExportParams(export_info)
842
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
843
    else:
844
      self._old_instance_name = None
845

    
846
    if (not self.cfg.GetVGName() and
847
        self.op.disk_template not in constants.DTS_NOT_LVM):
848
      raise errors.OpPrereqError("Cluster does not support lvm-based"
849
                                 " instances", errors.ECODE_STATE)
850

    
851
    if (self.op.hypervisor is None or
852
        self.op.hypervisor == constants.VALUE_AUTO):
853
      self.op.hypervisor = self.cfg.GetHypervisorType()
854

    
855
    cluster = self.cfg.GetClusterInfo()
856
    enabled_hvs = cluster.enabled_hypervisors
857
    if self.op.hypervisor not in enabled_hvs:
858
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
859
                                 " cluster (%s)" %
860
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
861
                                 errors.ECODE_STATE)
862

    
863
    # Check tag validity
864
    for tag in self.op.tags:
865
      objects.TaggableObject.ValidateTag(tag)
866

    
867
    # check hypervisor parameter syntax (locally)
868
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
869
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
870
                                      self.op.hvparams)
871
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
872
    hv_type.CheckParameterSyntax(filled_hvp)
873
    self.hv_full = filled_hvp
874
    # check that we don't specify global parameters on an instance
875
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
876
                         "instance", "cluster")
877

    
878
    # fill and remember the beparams dict
879
    self.be_full = _ComputeFullBeParams(self.op, cluster)
880

    
881
    # build os parameters
882
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
883

    
884
    # now that hvp/bep are in final format, let's reset to defaults,
885
    # if told to do so
886
    if self.op.identify_defaults:
887
      self._RevertToDefaults(cluster)
888

    
889
    # NIC buildup
890
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
891
                             self.proc.GetECId())
892

    
893
    # disk checks/pre-build
894
    default_vg = self.cfg.GetVGName()
895
    self.disks = ComputeDisks(self.op, default_vg)
896

    
897
    if self.op.mode == constants.INSTANCE_IMPORT:
898
      disk_images = []
899
      for idx in range(len(self.disks)):
900
        option = "disk%d_dump" % idx
901
        if export_info.has_option(constants.INISECT_INS, option):
902
          # FIXME: are the old os-es, disk sizes, etc. useful?
903
          export_name = export_info.get(constants.INISECT_INS, option)
904
          image = utils.PathJoin(self.op.src_path, export_name)
905
          disk_images.append(image)
906
        else:
907
          disk_images.append(False)
908

    
909
      self.src_images = disk_images
910

    
911
      if self.op.instance_name == self._old_instance_name:
912
        for idx, nic in enumerate(self.nics):
913
          if nic.mac == constants.VALUE_AUTO:
914
            nic_mac_ini = "nic%d_mac" % idx
915
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
916

    
917
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
918

    
919
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
920
    if self.op.ip_check:
921
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
922
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
923
                                   (self.check_ip, self.op.instance_name),
924
                                   errors.ECODE_NOTUNIQUE)
925

    
926
    #### mac address generation
927
    # By generating here the mac address both the allocator and the hooks get
928
    # the real final mac address rather than the 'auto' or 'generate' value.
929
    # There is a race condition between the generation and the instance object
930
    # creation, which means that we know the mac is valid now, but we're not
931
    # sure it will be when we actually add the instance. If things go bad
932
    # adding the instance will abort because of a duplicate mac, and the
933
    # creation job will fail.
934
    for nic in self.nics:
935
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
936
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
937

    
938
    #### allocator run
939

    
940
    if self.op.iallocator is not None:
941
      self._RunAllocator()
942

    
943
    # Release all unneeded node locks
944
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
945
                               self.op.src_node_uuid])
946
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
947
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
948
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
949

    
950
    assert (self.owned_locks(locking.LEVEL_NODE) ==
951
            self.owned_locks(locking.LEVEL_NODE_RES)), \
952
      "Node locks differ from node resource locks"
953

    
954
    #### node related checks
955

    
956
    # check primary node
957
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
958
    assert self.pnode is not None, \
959
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
960
    if pnode.offline:
961
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
962
                                 pnode.name, errors.ECODE_STATE)
963
    if pnode.drained:
964
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
965
                                 pnode.name, errors.ECODE_STATE)
966
    if not pnode.vm_capable:
967
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
968
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
969

    
970
    self.secondaries = []
971

    
972
    # Fill in any IPs from IP pools. This must happen here, because we need to
973
    # know the nic's primary node, as specified by the iallocator
974
    for idx, nic in enumerate(self.nics):
975
      net_uuid = nic.network
976
      if net_uuid is not None:
977
        nobj = self.cfg.GetNetwork(net_uuid)
978
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
979
        if netparams is None:
980
          raise errors.OpPrereqError("No netparams found for network"
981
                                     " %s. Propably not connected to"
982
                                     " node's %s nodegroup" %
983
                                     (nobj.name, self.pnode.name),
984
                                     errors.ECODE_INVAL)
985
        self.LogInfo("NIC/%d inherits netparams %s" %
986
                     (idx, netparams.values()))
987
        nic.nicparams = dict(netparams)
988
        if nic.ip is not None:
989
          if nic.ip.lower() == constants.NIC_IP_POOL:
990
            try:
991
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
992
            except errors.ReservationError:
993
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
994
                                         " from the address pool" % idx,
995
                                         errors.ECODE_STATE)
996
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
997
          else:
998
            try:
999
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1000
            except errors.ReservationError:
1001
              raise errors.OpPrereqError("IP address %s already in use"
1002
                                         " or does not belong to network %s" %
1003
                                         (nic.ip, nobj.name),
1004
                                         errors.ECODE_NOTUNIQUE)
1005

    
1006
      # net is None, ip None or given
1007
      elif self.op.conflicts_check:
1008
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1009

    
1010
    # mirror node verification
1011
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1012
      if self.op.snode_uuid == pnode.uuid:
1013
        raise errors.OpPrereqError("The secondary node cannot be the"
1014
                                   " primary node", errors.ECODE_INVAL)
1015
      CheckNodeOnline(self, self.op.snode_uuid)
1016
      CheckNodeNotDrained(self, self.op.snode_uuid)
1017
      CheckNodeVmCapable(self, self.op.snode_uuid)
1018
      self.secondaries.append(self.op.snode_uuid)
1019

    
1020
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1021
      if pnode.group != snode.group:
1022
        self.LogWarning("The primary and secondary nodes are in two"
1023
                        " different node groups; the disk parameters"
1024
                        " from the first disk's node group will be"
1025
                        " used")
1026

    
1027
    nodes = [pnode]
1028
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1029
      nodes.append(snode)
1030
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1031
    excl_stor = compat.any(map(has_es, nodes))
1032
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1033
      raise errors.OpPrereqError("Disk template %s not supported with"
1034
                                 " exclusive storage" % self.op.disk_template,
1035
                                 errors.ECODE_STATE)
1036
    for disk in self.disks:
1037
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1038

    
1039
    node_uuids = [pnode.uuid] + self.secondaries
1040

    
1041
    if not self.adopt_disks:
1042
      if self.op.disk_template == constants.DT_RBD:
1043
        # _CheckRADOSFreeSpace() is just a placeholder.
1044
        # Any function that checks prerequisites can be placed here.
1045
        # Check if there is enough space on the RADOS cluster.
1046
        CheckRADOSFreeSpace()
1047
      elif self.op.disk_template == constants.DT_EXT:
1048
        # FIXME: Function that checks prereqs if needed
1049
        pass
1050
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1051
        # Check lv size requirements, if not adopting
1052
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1053
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1054
      else:
1055
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1056
        pass
1057

    
1058
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1059
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1060
                                disk[constants.IDISK_ADOPT])
1061
                     for disk in self.disks])
1062
      if len(all_lvs) != len(self.disks):
1063
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1064
                                   errors.ECODE_INVAL)
1065
      for lv_name in all_lvs:
1066
        try:
1067
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1068
          # to ReserveLV uses the same syntax
1069
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1070
        except errors.ReservationError:
1071
          raise errors.OpPrereqError("LV named %s used by another instance" %
1072
                                     lv_name, errors.ECODE_NOTUNIQUE)
1073

    
1074
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1075
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1076

    
1077
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1078
                                       vg_names.payload.keys())[pnode.uuid]
1079
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1080
      node_lvs = node_lvs.payload
1081

    
1082
      delta = all_lvs.difference(node_lvs.keys())
1083
      if delta:
1084
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1085
                                   utils.CommaJoin(delta),
1086
                                   errors.ECODE_INVAL)
1087
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1088
      if online_lvs:
1089
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1090
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1091
                                   errors.ECODE_STATE)
1092
      # update the size of disk based on what is found
1093
      for dsk in self.disks:
1094
        dsk[constants.IDISK_SIZE] = \
1095
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1096
                                        dsk[constants.IDISK_ADOPT])][0]))
1097

    
1098
    elif self.op.disk_template == constants.DT_BLOCK:
1099
      # Normalize and de-duplicate device paths
1100
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1101
                       for disk in self.disks])
1102
      if len(all_disks) != len(self.disks):
1103
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1104
                                   errors.ECODE_INVAL)
1105
      baddisks = [d for d in all_disks
1106
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1107
      if baddisks:
1108
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1109
                                   " cannot be adopted" %
1110
                                   (utils.CommaJoin(baddisks),
1111
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1112
                                   errors.ECODE_INVAL)
1113

    
1114
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1115
                                            list(all_disks))[pnode.uuid]
1116
      node_disks.Raise("Cannot get block device information from node %s" %
1117
                       pnode.name)
1118
      node_disks = node_disks.payload
1119
      delta = all_disks.difference(node_disks.keys())
1120
      if delta:
1121
        raise errors.OpPrereqError("Missing block device(s): %s" %
1122
                                   utils.CommaJoin(delta),
1123
                                   errors.ECODE_INVAL)
1124
      for dsk in self.disks:
1125
        dsk[constants.IDISK_SIZE] = \
1126
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1127

    
1128
    # Verify instance specs
1129
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1130
    ispec = {
1131
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1132
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1133
      constants.ISPEC_DISK_COUNT: len(self.disks),
1134
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1135
                                  for disk in self.disks],
1136
      constants.ISPEC_NIC_COUNT: len(self.nics),
1137
      constants.ISPEC_SPINDLE_USE: spindle_use,
1138
      }
1139

    
1140
    group_info = self.cfg.GetNodeGroup(pnode.group)
1141
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1142
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1143
                                               self.op.disk_template)
1144
    if not self.op.ignore_ipolicy and res:
1145
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1146
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1147
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1148

    
1149
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1150

    
1151
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1152
    # check OS parameters (remotely)
1153
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1154

    
1155
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1156

    
1157
    #TODO: _CheckExtParams (remotely)
1158
    # Check parameters for extstorage
1159

    
1160
    # memory check on primary node
1161
    #TODO(dynmem): use MINMEM for checking
1162
    if self.op.start:
1163
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1164
                                self.op.hvparams)
1165
      CheckNodeFreeMemory(self, self.pnode.uuid,
1166
                          "creating instance %s" % self.op.instance_name,
1167
                          self.be_full[constants.BE_MAXMEM],
1168
                          self.op.hypervisor, hvfull)
1169

    
1170
    self.dry_run_result = list(node_uuids)
1171

    
1172
  def Exec(self, feedback_fn):
1173
    """Create and add the instance to the cluster.
1174

1175
    """
1176
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1177
                self.owned_locks(locking.LEVEL_NODE)), \
1178
      "Node locks differ from node resource locks"
1179
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1180

    
1181
    ht_kind = self.op.hypervisor
1182
    if ht_kind in constants.HTS_REQ_PORT:
1183
      network_port = self.cfg.AllocatePort()
1184
    else:
1185
      network_port = None
1186

    
1187
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1188

    
1189
    # This is ugly but we got a chicken-egg problem here
1190
    # We can only take the group disk parameters, as the instance
1191
    # has no disks yet (we are generating them right here).
1192
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1193
    disks = GenerateDiskTemplate(self,
1194
                                 self.op.disk_template,
1195
                                 instance_uuid, self.pnode.uuid,
1196
                                 self.secondaries,
1197
                                 self.disks,
1198
                                 self.instance_file_storage_dir,
1199
                                 self.op.file_driver,
1200
                                 0,
1201
                                 feedback_fn,
1202
                                 self.cfg.GetGroupDiskParams(nodegroup))
1203

    
1204
    iobj = objects.Instance(name=self.op.instance_name,
1205
                            uuid=instance_uuid,
1206
                            os=self.op.os_type,
1207
                            primary_node=self.pnode.uuid,
1208
                            nics=self.nics, disks=disks,
1209
                            disk_template=self.op.disk_template,
1210
                            disks_active=False,
1211
                            admin_state=constants.ADMINST_DOWN,
1212
                            network_port=network_port,
1213
                            beparams=self.op.beparams,
1214
                            hvparams=self.op.hvparams,
1215
                            hypervisor=self.op.hypervisor,
1216
                            osparams=self.op.osparams,
1217
                            )
1218

    
1219
    if self.op.tags:
1220
      for tag in self.op.tags:
1221
        iobj.AddTag(tag)
1222

    
1223
    if self.adopt_disks:
1224
      if self.op.disk_template == constants.DT_PLAIN:
1225
        # rename LVs to the newly-generated names; we need to construct
1226
        # 'fake' LV disks with the old data, plus the new unique_id
1227
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1228
        rename_to = []
1229
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1230
          rename_to.append(t_dsk.logical_id)
1231
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1232
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1233
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1234
                                               zip(tmp_disks, rename_to))
1235
        result.Raise("Failed to rename adoped LVs")
1236
    else:
1237
      feedback_fn("* creating instance disks...")
1238
      try:
1239
        CreateDisks(self, iobj)
1240
      except errors.OpExecError:
1241
        self.LogWarning("Device creation failed")
1242
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1243
        raise
1244

    
1245
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1246

    
1247
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1248

    
1249
    # Declare that we don't want to remove the instance lock anymore, as we've
1250
    # added the instance to the config
1251
    del self.remove_locks[locking.LEVEL_INSTANCE]
1252

    
1253
    if self.op.mode == constants.INSTANCE_IMPORT:
1254
      # Release unused nodes
1255
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1256
    else:
1257
      # Release all nodes
1258
      ReleaseLocks(self, locking.LEVEL_NODE)
1259

    
1260
    disk_abort = False
1261
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1262
      feedback_fn("* wiping instance disks...")
1263
      try:
1264
        WipeDisks(self, iobj)
1265
      except errors.OpExecError, err:
1266
        logging.exception("Wiping disks failed")
1267
        self.LogWarning("Wiping instance disks failed (%s)", err)
1268
        disk_abort = True
1269

    
1270
    if disk_abort:
1271
      # Something is already wrong with the disks, don't do anything else
1272
      pass
1273
    elif self.op.wait_for_sync:
1274
      disk_abort = not WaitForSync(self, iobj)
1275
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1276
      # make sure the disks are not degraded (still sync-ing is ok)
1277
      feedback_fn("* checking mirrors status")
1278
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1279
    else:
1280
      disk_abort = False
1281

    
1282
    if disk_abort:
1283
      RemoveDisks(self, iobj)
1284
      self.cfg.RemoveInstance(iobj.uuid)
1285
      # Make sure the instance lock gets removed
1286
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1287
      raise errors.OpExecError("There are some degraded disks for"
1288
                               " this instance")
1289

    
1290
    # instance disks are now active
1291
    iobj.disks_active = True
1292

    
1293
    # Release all node resource locks
1294
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1295

    
1296
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1297
      # we need to set the disks ID to the primary node, since the
1298
      # preceding code might or might have not done it, depending on
1299
      # disk template and other options
1300
      for disk in iobj.disks:
1301
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1302
      if self.op.mode == constants.INSTANCE_CREATE:
1303
        if not self.op.no_install:
1304
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1305
                        not self.op.wait_for_sync)
1306
          if pause_sync:
1307
            feedback_fn("* pausing disk sync to install instance OS")
1308
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1309
                                                              (iobj.disks,
1310
                                                               iobj), True)
1311
            for idx, success in enumerate(result.payload):
1312
              if not success:
1313
                logging.warn("pause-sync of instance %s for disk %d failed",
1314
                             self.op.instance_name, idx)
1315

    
1316
          feedback_fn("* running the instance OS create scripts...")
1317
          # FIXME: pass debug option from opcode to backend
1318
          os_add_result = \
1319
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1320
                                          self.op.debug_level)
1321
          if pause_sync:
1322
            feedback_fn("* resuming disk sync")
1323
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1324
                                                              (iobj.disks,
1325
                                                               iobj), False)
1326
            for idx, success in enumerate(result.payload):
1327
              if not success:
1328
                logging.warn("resume-sync of instance %s for disk %d failed",
1329
                             self.op.instance_name, idx)
1330

    
1331
          os_add_result.Raise("Could not add os for instance %s"
1332
                              " on node %s" % (self.op.instance_name,
1333
                                               self.pnode.name))
1334

    
1335
      else:
1336
        if self.op.mode == constants.INSTANCE_IMPORT:
1337
          feedback_fn("* running the instance OS import scripts...")
1338

    
1339
          transfers = []
1340

    
1341
          for idx, image in enumerate(self.src_images):
1342
            if not image:
1343
              continue
1344

    
1345
            # FIXME: pass debug option from opcode to backend
1346
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1347
                                               constants.IEIO_FILE, (image, ),
1348
                                               constants.IEIO_SCRIPT,
1349
                                               (iobj.disks[idx], idx),
1350
                                               None)
1351
            transfers.append(dt)
1352

    
1353
          import_result = \
1354
            masterd.instance.TransferInstanceData(self, feedback_fn,
1355
                                                  self.op.src_node_uuid,
1356
                                                  self.pnode.uuid,
1357
                                                  self.pnode.secondary_ip,
1358
                                                  iobj, transfers)
1359
          if not compat.all(import_result):
1360
            self.LogWarning("Some disks for instance %s on node %s were not"
1361
                            " imported successfully" % (self.op.instance_name,
1362
                                                        self.pnode.name))
1363

    
1364
          rename_from = self._old_instance_name
1365

    
1366
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1367
          feedback_fn("* preparing remote import...")
1368
          # The source cluster will stop the instance before attempting to make
1369
          # a connection. In some cases stopping an instance can take a long
1370
          # time, hence the shutdown timeout is added to the connection
1371
          # timeout.
1372
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1373
                             self.op.source_shutdown_timeout)
1374
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1375

    
1376
          assert iobj.primary_node == self.pnode.uuid
1377
          disk_results = \
1378
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1379
                                          self.source_x509_ca,
1380
                                          self._cds, timeouts)
1381
          if not compat.all(disk_results):
1382
            # TODO: Should the instance still be started, even if some disks
1383
            # failed to import (valid for local imports, too)?
1384
            self.LogWarning("Some disks for instance %s on node %s were not"
1385
                            " imported successfully" % (self.op.instance_name,
1386
                                                        self.pnode.name))
1387

    
1388
          rename_from = self.source_instance_name
1389

    
1390
        else:
1391
          # also checked in the prereq part
1392
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1393
                                       % self.op.mode)
1394

    
1395
        # Run rename script on newly imported instance
1396
        assert iobj.name == self.op.instance_name
1397
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1398
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1399
                                                   rename_from,
1400
                                                   self.op.debug_level)
1401
        result.Warn("Failed to run rename script for %s on node %s" %
1402
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1403

    
1404
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1405

    
1406
    if self.op.start:
1407
      iobj.admin_state = constants.ADMINST_UP
1408
      self.cfg.Update(iobj, feedback_fn)
1409
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1410
                   self.pnode.name)
1411
      feedback_fn("* starting instance...")
1412
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1413
                                            False, self.op.reason)
1414
      result.Raise("Could not start instance")
1415

    
1416
    return list(iobj.all_nodes)
1417

    
1418

    
1419
class LUInstanceRename(LogicalUnit):
1420
  """Rename an instance.
1421

1422
  """
1423
  HPATH = "instance-rename"
1424
  HTYPE = constants.HTYPE_INSTANCE
1425

    
1426
  def CheckArguments(self):
1427
    """Check arguments.
1428

1429
    """
1430
    if self.op.ip_check and not self.op.name_check:
1431
      # TODO: make the ip check more flexible and not depend on the name check
1432
      raise errors.OpPrereqError("IP address check requires a name check",
1433
                                 errors.ECODE_INVAL)
1434

    
1435
  def BuildHooksEnv(self):
1436
    """Build hooks env.
1437

1438
    This runs on master, primary and secondary nodes of the instance.
1439

1440
    """
1441
    env = BuildInstanceHookEnvByObject(self, self.instance)
1442
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1443
    return env
1444

    
1445
  def BuildHooksNodes(self):
1446
    """Build hooks nodes.
1447

1448
    """
1449
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1450
    return (nl, nl)
1451

    
1452
  def CheckPrereq(self):
1453
    """Check prerequisites.
1454

1455
    This checks that the instance is in the cluster and is not running.
1456

1457
    """
1458
    (self.op.instance_uuid, self.op.instance_name) = \
1459
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1460
                                self.op.instance_name)
1461
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1462
    assert instance is not None
1463

    
1464
    # It should actually not happen that an instance is running with a disabled
1465
    # disk template, but in case it does, the renaming of file-based instances
1466
    # will fail horribly. Thus, we test it before.
1467
    if (instance.disk_template in constants.DTS_FILEBASED and
1468
        self.op.new_name != instance.name):
1469
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1470
                               instance.disk_template)
1471

    
1472
    CheckNodeOnline(self, instance.primary_node)
1473
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1474
                       msg="cannot rename")
1475
    self.instance = instance
1476

    
1477
    new_name = self.op.new_name
1478
    if self.op.name_check:
1479
      hostname = _CheckHostnameSane(self, new_name)
1480
      new_name = self.op.new_name = hostname.name
1481
      if (self.op.ip_check and
1482
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1483
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1484
                                   (hostname.ip, new_name),
1485
                                   errors.ECODE_NOTUNIQUE)
1486

    
1487
    instance_names = [inst.name for
1488
                      inst in self.cfg.GetAllInstancesInfo().values()]
1489
    if new_name in instance_names and new_name != instance.name:
1490
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1491
                                 new_name, errors.ECODE_EXISTS)
1492

    
1493
  def Exec(self, feedback_fn):
1494
    """Rename the instance.
1495

1496
    """
1497
    old_name = self.instance.name
1498

    
1499
    rename_file_storage = False
1500
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1501
        self.op.new_name != self.instance.name):
1502
      old_file_storage_dir = os.path.dirname(
1503
                               self.instance.disks[0].logical_id[1])
1504
      rename_file_storage = True
1505

    
1506
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1507
    # Change the instance lock. This is definitely safe while we hold the BGL.
1508
    # Otherwise the new lock would have to be added in acquired mode.
1509
    assert self.REQ_BGL
1510
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1511
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1512
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1513

    
1514
    # re-read the instance from the configuration after rename
1515
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1516

    
1517
    if rename_file_storage:
1518
      new_file_storage_dir = os.path.dirname(
1519
                               renamed_inst.disks[0].logical_id[1])
1520
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1521
                                                     old_file_storage_dir,
1522
                                                     new_file_storage_dir)
1523
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1524
                   " (but the instance has been renamed in Ganeti)" %
1525
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1526
                    old_file_storage_dir, new_file_storage_dir))
1527

    
1528
    StartInstanceDisks(self, renamed_inst, None)
1529
    # update info on disks
1530
    info = GetInstanceInfoText(renamed_inst)
1531
    for (idx, disk) in enumerate(renamed_inst.disks):
1532
      for node_uuid in renamed_inst.all_nodes:
1533
        self.cfg.SetDiskID(disk, node_uuid)
1534
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1535
        result.Warn("Error setting info on node %s for disk %s" %
1536
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1537
    try:
1538
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1539
                                                 renamed_inst, old_name,
1540
                                                 self.op.debug_level)
1541
      result.Warn("Could not run OS rename script for instance %s on node %s"
1542
                  " (but the instance has been renamed in Ganeti)" %
1543
                  (renamed_inst.name,
1544
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1545
                  self.LogWarning)
1546
    finally:
1547
      ShutdownInstanceDisks(self, renamed_inst)
1548

    
1549
    return renamed_inst.name
1550

    
1551

    
1552
class LUInstanceRemove(LogicalUnit):
1553
  """Remove an instance.
1554

1555
  """
1556
  HPATH = "instance-remove"
1557
  HTYPE = constants.HTYPE_INSTANCE
1558
  REQ_BGL = False
1559

    
1560
  def ExpandNames(self):
1561
    self._ExpandAndLockInstance()
1562
    self.needed_locks[locking.LEVEL_NODE] = []
1563
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1564
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1565

    
1566
  def DeclareLocks(self, level):
1567
    if level == locking.LEVEL_NODE:
1568
      self._LockInstancesNodes()
1569
    elif level == locking.LEVEL_NODE_RES:
1570
      # Copy node locks
1571
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1572
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1573

    
1574
  def BuildHooksEnv(self):
1575
    """Build hooks env.
1576

1577
    This runs on master, primary and secondary nodes of the instance.
1578

1579
    """
1580
    env = BuildInstanceHookEnvByObject(self, self.instance)
1581
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1582
    return env
1583

    
1584
  def BuildHooksNodes(self):
1585
    """Build hooks nodes.
1586

1587
    """
1588
    nl = [self.cfg.GetMasterNode()]
1589
    nl_post = list(self.instance.all_nodes) + nl
1590
    return (nl, nl_post)
1591

    
1592
  def CheckPrereq(self):
1593
    """Check prerequisites.
1594

1595
    This checks that the instance is in the cluster.
1596

1597
    """
1598
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1599
    assert self.instance is not None, \
1600
      "Cannot retrieve locked instance %s" % self.op.instance_name
1601

    
1602
  def Exec(self, feedback_fn):
1603
    """Remove the instance.
1604

1605
    """
1606
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1607
                 self.cfg.GetNodeName(self.instance.primary_node))
1608

    
1609
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1610
                                             self.instance,
1611
                                             self.op.shutdown_timeout,
1612
                                             self.op.reason)
1613
    if self.op.ignore_failures:
1614
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1615
    else:
1616
      result.Raise("Could not shutdown instance %s on node %s" %
1617
                   (self.instance.name,
1618
                    self.cfg.GetNodeName(self.instance.primary_node)))
1619

    
1620
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1621
            self.owned_locks(locking.LEVEL_NODE_RES))
1622
    assert not (set(self.instance.all_nodes) -
1623
                self.owned_locks(locking.LEVEL_NODE)), \
1624
      "Not owning correct locks"
1625

    
1626
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1627

    
1628

    
1629
class LUInstanceMove(LogicalUnit):
1630
  """Move an instance by data-copying.
1631

1632
  """
1633
  HPATH = "instance-move"
1634
  HTYPE = constants.HTYPE_INSTANCE
1635
  REQ_BGL = False
1636

    
1637
  def ExpandNames(self):
1638
    self._ExpandAndLockInstance()
1639
    (self.op.target_node_uuid, self.op.target_node) = \
1640
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1641
                            self.op.target_node)
1642
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1643
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1644
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1645

    
1646
  def DeclareLocks(self, level):
1647
    if level == locking.LEVEL_NODE:
1648
      self._LockInstancesNodes(primary_only=True)
1649
    elif level == locking.LEVEL_NODE_RES:
1650
      # Copy node locks
1651
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1652
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1653

    
1654
  def BuildHooksEnv(self):
1655
    """Build hooks env.
1656

1657
    This runs on master, primary and secondary nodes of the instance.
1658

1659
    """
1660
    env = {
1661
      "TARGET_NODE": self.op.target_node,
1662
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1663
      }
1664
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1665
    return env
1666

    
1667
  def BuildHooksNodes(self):
1668
    """Build hooks nodes.
1669

1670
    """
1671
    nl = [
1672
      self.cfg.GetMasterNode(),
1673
      self.instance.primary_node,
1674
      self.op.target_node_uuid,
1675
      ]
1676
    return (nl, nl)
1677

    
1678
  def CheckPrereq(self):
1679
    """Check prerequisites.
1680

1681
    This checks that the instance is in the cluster.
1682

1683
    """
1684
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1685
    assert self.instance is not None, \
1686
      "Cannot retrieve locked instance %s" % self.op.instance_name
1687

    
1688
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1689
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1690
                                 self.instance.disk_template,
1691
                                 errors.ECODE_STATE)
1692

    
1693
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1694
    assert target_node is not None, \
1695
      "Cannot retrieve locked node %s" % self.op.target_node
1696

    
1697
    self.target_node_uuid = target_node.uuid
1698
    if target_node.uuid == self.instance.primary_node:
1699
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1700
                                 (self.instance.name, target_node.name),
1701
                                 errors.ECODE_STATE)
1702

    
1703
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1704

    
1705
    for idx, dsk in enumerate(self.instance.disks):
1706
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1707
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1708
                                   " cannot copy" % idx, errors.ECODE_STATE)
1709

    
1710
    CheckNodeOnline(self, target_node.uuid)
1711
    CheckNodeNotDrained(self, target_node.uuid)
1712
    CheckNodeVmCapable(self, target_node.uuid)
1713
    cluster = self.cfg.GetClusterInfo()
1714
    group_info = self.cfg.GetNodeGroup(target_node.group)
1715
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1716
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1717
                           ignore=self.op.ignore_ipolicy)
1718

    
1719
    if self.instance.admin_state == constants.ADMINST_UP:
1720
      # check memory requirements on the secondary node
1721
      CheckNodeFreeMemory(
1722
          self, target_node.uuid, "failing over instance %s" %
1723
          self.instance.name, bep[constants.BE_MAXMEM],
1724
          self.instance.hypervisor,
1725
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1726
    else:
1727
      self.LogInfo("Not checking memory on the secondary node as"
1728
                   " instance will not be started")
1729

    
1730
    # check bridge existance
1731
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1732

    
1733
  def Exec(self, feedback_fn):
1734
    """Move an instance.
1735

1736
    The move is done by shutting it down on its present node, copying
1737
    the data over (slow) and starting it on the new node.
1738

1739
    """
1740
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1741
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1742

    
1743
    self.LogInfo("Shutting down instance %s on source node %s",
1744
                 self.instance.name, source_node.name)
1745

    
1746
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1747
            self.owned_locks(locking.LEVEL_NODE_RES))
1748

    
1749
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1750
                                             self.op.shutdown_timeout,
1751
                                             self.op.reason)
1752
    if self.op.ignore_consistency:
1753
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1754
                  " anyway. Please make sure node %s is down. Error details" %
1755
                  (self.instance.name, source_node.name, source_node.name),
1756
                  self.LogWarning)
1757
    else:
1758
      result.Raise("Could not shutdown instance %s on node %s" %
1759
                   (self.instance.name, source_node.name))
1760

    
1761
    # create the target disks
1762
    try:
1763
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1764
    except errors.OpExecError:
1765
      self.LogWarning("Device creation failed")
1766
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1767
      raise
1768

    
1769
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1770

    
1771
    errs = []
1772
    # activate, get path, copy the data over
1773
    for idx, disk in enumerate(self.instance.disks):
1774
      self.LogInfo("Copying data for disk %d", idx)
1775
      result = self.rpc.call_blockdev_assemble(
1776
                 target_node.uuid, (disk, self.instance), self.instance.name,
1777
                 True, idx)
1778
      if result.fail_msg:
1779
        self.LogWarning("Can't assemble newly created disk %d: %s",
1780
                        idx, result.fail_msg)
1781
        errs.append(result.fail_msg)
1782
        break
1783
      dev_path = result.payload
1784
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1785
                                                                self.instance),
1786
                                             target_node.name, dev_path,
1787
                                             cluster_name)
1788
      if result.fail_msg:
1789
        self.LogWarning("Can't copy data over for disk %d: %s",
1790
                        idx, result.fail_msg)
1791
        errs.append(result.fail_msg)
1792
        break
1793

    
1794
    if errs:
1795
      self.LogWarning("Some disks failed to copy, aborting")
1796
      try:
1797
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1798
      finally:
1799
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1800
        raise errors.OpExecError("Errors during disk copy: %s" %
1801
                                 (",".join(errs),))
1802

    
1803
    self.instance.primary_node = target_node.uuid
1804
    self.cfg.Update(self.instance, feedback_fn)
1805

    
1806
    self.LogInfo("Removing the disks on the original node")
1807
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1808

    
1809
    # Only start the instance if it's marked as up
1810
    if self.instance.admin_state == constants.ADMINST_UP:
1811
      self.LogInfo("Starting instance %s on node %s",
1812
                   self.instance.name, target_node.name)
1813

    
1814
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1815
                                          ignore_secondaries=True)
1816
      if not disks_ok:
1817
        ShutdownInstanceDisks(self, self.instance)
1818
        raise errors.OpExecError("Can't activate the instance's disks")
1819

    
1820
      result = self.rpc.call_instance_start(target_node.uuid,
1821
                                            (self.instance, None, None), False,
1822
                                            self.op.reason)
1823
      msg = result.fail_msg
1824
      if msg:
1825
        ShutdownInstanceDisks(self, self.instance)
1826
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1827
                                 (self.instance.name, target_node.name, msg))
1828

    
1829

    
1830
class LUInstanceMultiAlloc(NoHooksLU):
1831
  """Allocates multiple instances at the same time.
1832

1833
  """
1834
  REQ_BGL = False
1835

    
1836
  def CheckArguments(self):
1837
    """Check arguments.
1838

1839
    """
1840
    nodes = []
1841
    for inst in self.op.instances:
1842
      if inst.iallocator is not None:
1843
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1844
                                   " instance objects", errors.ECODE_INVAL)
1845
      nodes.append(bool(inst.pnode))
1846
      if inst.disk_template in constants.DTS_INT_MIRROR:
1847
        nodes.append(bool(inst.snode))
1848

    
1849
    has_nodes = compat.any(nodes)
1850
    if compat.all(nodes) ^ has_nodes:
1851
      raise errors.OpPrereqError("There are instance objects providing"
1852
                                 " pnode/snode while others do not",
1853
                                 errors.ECODE_INVAL)
1854

    
1855
    if self.op.iallocator is None:
1856
      default_iallocator = self.cfg.GetDefaultIAllocator()
1857
      if default_iallocator and has_nodes:
1858
        self.op.iallocator = default_iallocator
1859
      else:
1860
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1861
                                   " given and no cluster-wide default"
1862
                                   " iallocator found; please specify either"
1863
                                   " an iallocator or nodes on the instances"
1864
                                   " or set a cluster-wide default iallocator",
1865
                                   errors.ECODE_INVAL)
1866

    
1867
    _CheckOpportunisticLocking(self.op)
1868

    
1869
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1870
    if dups:
1871
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1872
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1873

    
1874
  def ExpandNames(self):
1875
    """Calculate the locks.
1876

1877
    """
1878
    self.share_locks = ShareAll()
1879
    self.needed_locks = {
1880
      # iallocator will select nodes and even if no iallocator is used,
1881
      # collisions with LUInstanceCreate should be avoided
1882
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1883
      }
1884

    
1885
    if self.op.iallocator:
1886
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1887
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1888

    
1889
      if self.op.opportunistic_locking:
1890
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1891
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1892
    else:
1893
      nodeslist = []
1894
      for inst in self.op.instances:
1895
        (inst.pnode_uuid, inst.pnode) = \
1896
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1897
        nodeslist.append(inst.pnode)
1898
        if inst.snode is not None:
1899
          (inst.snode_uuid, inst.snode) = \
1900
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1901
          nodeslist.append(inst.snode)
1902

    
1903
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1904
      # Lock resources of instance's primary and secondary nodes (copy to
1905
      # prevent accidential modification)
1906
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1907

    
1908
  def CheckPrereq(self):
1909
    """Check prerequisite.
1910

1911
    """
1912
    cluster = self.cfg.GetClusterInfo()
1913
    default_vg = self.cfg.GetVGName()
1914
    ec_id = self.proc.GetECId()
1915

    
1916
    if self.op.opportunistic_locking:
1917
      # Only consider nodes for which a lock is held
1918
      node_whitelist = self.cfg.GetNodeNames(
1919
                         list(self.owned_locks(locking.LEVEL_NODE)))
1920
    else:
1921
      node_whitelist = None
1922

    
1923
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1924
                                         _ComputeNics(op, cluster, None,
1925
                                                      self.cfg, ec_id),
1926
                                         _ComputeFullBeParams(op, cluster),
1927
                                         node_whitelist)
1928
             for op in self.op.instances]
1929

    
1930
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1931
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1932

    
1933
    ial.Run(self.op.iallocator)
1934

    
1935
    if not ial.success:
1936
      raise errors.OpPrereqError("Can't compute nodes using"
1937
                                 " iallocator '%s': %s" %
1938
                                 (self.op.iallocator, ial.info),
1939
                                 errors.ECODE_NORES)
1940

    
1941
    self.ia_result = ial.result
1942

    
1943
    if self.op.dry_run:
1944
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1945
        constants.JOB_IDS_KEY: [],
1946
        })
1947

    
1948
  def _ConstructPartialResult(self):
1949
    """Contructs the partial result.
1950

1951
    """
1952
    (allocatable, failed) = self.ia_result
1953
    return {
1954
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1955
        map(compat.fst, allocatable),
1956
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1957
      }
1958

    
1959
  def Exec(self, feedback_fn):
1960
    """Executes the opcode.
1961

1962
    """
1963
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1964
    (allocatable, failed) = self.ia_result
1965

    
1966
    jobs = []
1967
    for (name, node_names) in allocatable:
1968
      op = op2inst.pop(name)
1969

    
1970
      (op.pnode_uuid, op.pnode) = \
1971
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1972
      if len(node_names) > 1:
1973
        (op.snode_uuid, op.snode) = \
1974
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1975

    
1976
      jobs.append([op])
1977

    
1978
    missing = set(op2inst.keys()) - set(failed)
1979
    assert not missing, \
1980
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1981

    
1982
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1983

    
1984

    
1985
class _InstNicModPrivate:
1986
  """Data structure for network interface modifications.
1987

1988
  Used by L{LUInstanceSetParams}.
1989

1990
  """
1991
  def __init__(self):
1992
    self.params = None
1993
    self.filled = None
1994

    
1995

    
1996
def _PrepareContainerMods(mods, private_fn):
1997
  """Prepares a list of container modifications by adding a private data field.
1998

1999
  @type mods: list of tuples; (operation, index, parameters)
2000
  @param mods: List of modifications
2001
  @type private_fn: callable or None
2002
  @param private_fn: Callable for constructing a private data field for a
2003
    modification
2004
  @rtype: list
2005

2006
  """
2007
  if private_fn is None:
2008
    fn = lambda: None
2009
  else:
2010
    fn = private_fn
2011

    
2012
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2013

    
2014

    
2015
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2016
  """Checks if nodes have enough physical CPUs
2017

2018
  This function checks if all given nodes have the needed number of
2019
  physical CPUs. In case any node has less CPUs or we cannot get the
2020
  information from the node, this function raises an OpPrereqError
2021
  exception.
2022

2023
  @type lu: C{LogicalUnit}
2024
  @param lu: a logical unit from which we get configuration data
2025
  @type node_uuids: C{list}
2026
  @param node_uuids: the list of node UUIDs to check
2027
  @type requested: C{int}
2028
  @param requested: the minimum acceptable number of physical CPUs
2029
  @type hypervisor_specs: list of pairs (string, dict of strings)
2030
  @param hypervisor_specs: list of hypervisor specifications in
2031
      pairs (hypervisor_name, hvparams)
2032
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2033
      or we cannot check the node
2034

2035
  """
2036
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2037
  for node_uuid in node_uuids:
2038
    info = nodeinfo[node_uuid]
2039
    node_name = lu.cfg.GetNodeName(node_uuid)
2040
    info.Raise("Cannot get current information from node %s" % node_name,
2041
               prereq=True, ecode=errors.ECODE_ENVIRON)
2042
    (_, _, (hv_info, )) = info.payload
2043
    num_cpus = hv_info.get("cpu_total", None)
2044
    if not isinstance(num_cpus, int):
2045
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2046
                                 " on node %s, result was '%s'" %
2047
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2048
    if requested > num_cpus:
2049
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2050
                                 "required" % (node_name, num_cpus, requested),
2051
                                 errors.ECODE_NORES)
2052

    
2053

    
2054
def GetItemFromContainer(identifier, kind, container):
2055
  """Return the item refered by the identifier.
2056

2057
  @type identifier: string
2058
  @param identifier: Item index or name or UUID
2059
  @type kind: string
2060
  @param kind: One-word item description
2061
  @type container: list
2062
  @param container: Container to get the item from
2063

2064
  """
2065
  # Index
2066
  try:
2067
    idx = int(identifier)
2068
    if idx == -1:
2069
      # Append
2070
      absidx = len(container) - 1
2071
    elif idx < 0:
2072
      raise IndexError("Not accepting negative indices other than -1")
2073
    elif idx > len(container):
2074
      raise IndexError("Got %s index %s, but there are only %s" %
2075
                       (kind, idx, len(container)))
2076
    else:
2077
      absidx = idx
2078
    return (absidx, container[idx])
2079
  except ValueError:
2080
    pass
2081

    
2082
  for idx, item in enumerate(container):
2083
    if item.uuid == identifier or item.name == identifier:
2084
      return (idx, item)
2085

    
2086
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2087
                             (kind, identifier), errors.ECODE_NOENT)
2088

    
2089

    
2090
def _ApplyContainerMods(kind, container, chgdesc, mods,
2091
                        create_fn, modify_fn, remove_fn):
2092
  """Applies descriptions in C{mods} to C{container}.
2093

2094
  @type kind: string
2095
  @param kind: One-word item description
2096
  @type container: list
2097
  @param container: Container to modify
2098
  @type chgdesc: None or list
2099
  @param chgdesc: List of applied changes
2100
  @type mods: list
2101
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2102
  @type create_fn: callable
2103
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2104
    receives absolute item index, parameters and private data object as added
2105
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2106
    as list
2107
  @type modify_fn: callable
2108
  @param modify_fn: Callback for modifying an existing item
2109
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2110
    and private data object as added by L{_PrepareContainerMods}, returns
2111
    changes as list
2112
  @type remove_fn: callable
2113
  @param remove_fn: Callback on removing item; receives absolute item index,
2114
    item and private data object as added by L{_PrepareContainerMods}
2115

2116
  """
2117
  for (op, identifier, params, private) in mods:
2118
    changes = None
2119

    
2120
    if op == constants.DDM_ADD:
2121
      # Calculate where item will be added
2122
      # When adding an item, identifier can only be an index
2123
      try:
2124
        idx = int(identifier)
2125
      except ValueError:
2126
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2127
                                   " identifier for %s" % constants.DDM_ADD,
2128
                                   errors.ECODE_INVAL)
2129
      if idx == -1:
2130
        addidx = len(container)
2131
      else:
2132
        if idx < 0:
2133
          raise IndexError("Not accepting negative indices other than -1")
2134
        elif idx > len(container):
2135
          raise IndexError("Got %s index %s, but there are only %s" %
2136
                           (kind, idx, len(container)))
2137
        addidx = idx
2138

    
2139
      if create_fn is None:
2140
        item = params
2141
      else:
2142
        (item, changes) = create_fn(addidx, params, private)
2143

    
2144
      if idx == -1:
2145
        container.append(item)
2146
      else:
2147
        assert idx >= 0
2148
        assert idx <= len(container)
2149
        # list.insert does so before the specified index
2150
        container.insert(idx, item)
2151
    else:
2152
      # Retrieve existing item
2153
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2154

    
2155
      if op == constants.DDM_REMOVE:
2156
        assert not params
2157

    
2158
        if remove_fn is not None:
2159
          remove_fn(absidx, item, private)
2160

    
2161
        changes = [("%s/%s" % (kind, absidx), "remove")]
2162

    
2163
        assert container[absidx] == item
2164
        del container[absidx]
2165
      elif op == constants.DDM_MODIFY:
2166
        if modify_fn is not None:
2167
          changes = modify_fn(absidx, item, params, private)
2168
      else:
2169
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2170

    
2171
    assert _TApplyContModsCbChanges(changes)
2172

    
2173
    if not (chgdesc is None or changes is None):
2174
      chgdesc.extend(changes)
2175

    
2176

    
2177
def _UpdateIvNames(base_index, disks):
2178
  """Updates the C{iv_name} attribute of disks.
2179

2180
  @type disks: list of L{objects.Disk}
2181

2182
  """
2183
  for (idx, disk) in enumerate(disks):
2184
    disk.iv_name = "disk/%s" % (base_index + idx, )
2185

    
2186

    
2187
class LUInstanceSetParams(LogicalUnit):
2188
  """Modifies an instances's parameters.
2189

2190
  """
2191
  HPATH = "instance-modify"
2192
  HTYPE = constants.HTYPE_INSTANCE
2193
  REQ_BGL = False
2194

    
2195
  @staticmethod
2196
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2197
    assert ht.TList(mods)
2198
    assert not mods or len(mods[0]) in (2, 3)
2199

    
2200
    if mods and len(mods[0]) == 2:
2201
      result = []
2202

    
2203
      addremove = 0
2204
      for op, params in mods:
2205
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2206
          result.append((op, -1, params))
2207
          addremove += 1
2208

    
2209
          if addremove > 1:
2210
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2211
                                       " supported at a time" % kind,
2212
                                       errors.ECODE_INVAL)
2213
        else:
2214
          result.append((constants.DDM_MODIFY, op, params))
2215

    
2216
      assert verify_fn(result)
2217
    else:
2218
      result = mods
2219

    
2220
    return result
2221

    
2222
  @staticmethod
2223
  def _CheckMods(kind, mods, key_types, item_fn):
2224
    """Ensures requested disk/NIC modifications are valid.
2225

2226
    """
2227
    for (op, _, params) in mods:
2228
      assert ht.TDict(params)
2229

    
2230
      # If 'key_types' is an empty dict, we assume we have an
2231
      # 'ext' template and thus do not ForceDictType
2232
      if key_types:
2233
        utils.ForceDictType(params, key_types)
2234

    
2235
      if op == constants.DDM_REMOVE:
2236
        if params:
2237
          raise errors.OpPrereqError("No settings should be passed when"
2238
                                     " removing a %s" % kind,
2239
                                     errors.ECODE_INVAL)
2240
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2241
        item_fn(op, params)
2242
      else:
2243
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2244

    
2245
  @staticmethod
2246
  def _VerifyDiskModification(op, params, excl_stor):
2247
    """Verifies a disk modification.
2248

2249
    """
2250
    if op == constants.DDM_ADD:
2251
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2252
      if mode not in constants.DISK_ACCESS_SET:
2253
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2254
                                   errors.ECODE_INVAL)
2255

    
2256
      size = params.get(constants.IDISK_SIZE, None)
2257
      if size is None:
2258
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2259
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2260

    
2261
      try:
2262
        size = int(size)
2263
      except (TypeError, ValueError), err:
2264
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2265
                                   errors.ECODE_INVAL)
2266

    
2267
      params[constants.IDISK_SIZE] = size
2268
      name = params.get(constants.IDISK_NAME, None)
2269
      if name is not None and name.lower() == constants.VALUE_NONE:
2270
        params[constants.IDISK_NAME] = None
2271

    
2272
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2273

    
2274
    elif op == constants.DDM_MODIFY:
2275
      if constants.IDISK_SIZE in params:
2276
        raise errors.OpPrereqError("Disk size change not possible, use"
2277
                                   " grow-disk", errors.ECODE_INVAL)
2278
      if len(params) > 2:
2279
        raise errors.OpPrereqError("Disk modification doesn't support"
2280
                                   " additional arbitrary parameters",
2281
                                   errors.ECODE_INVAL)
2282
      name = params.get(constants.IDISK_NAME, None)
2283
      if name is not None and name.lower() == constants.VALUE_NONE:
2284
        params[constants.IDISK_NAME] = None
2285

    
2286
  @staticmethod
2287
  def _VerifyNicModification(op, params):
2288
    """Verifies a network interface modification.
2289

2290
    """
2291
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2292
      ip = params.get(constants.INIC_IP, None)
2293
      name = params.get(constants.INIC_NAME, None)
2294
      req_net = params.get(constants.INIC_NETWORK, None)
2295
      link = params.get(constants.NIC_LINK, None)
2296
      mode = params.get(constants.NIC_MODE, None)
2297
      if name is not None and name.lower() == constants.VALUE_NONE:
2298
        params[constants.INIC_NAME] = None
2299
      if req_net is not None:
2300
        if req_net.lower() == constants.VALUE_NONE:
2301
          params[constants.INIC_NETWORK] = None
2302
          req_net = None
2303
        elif link is not None or mode is not None:
2304
          raise errors.OpPrereqError("If network is given"
2305
                                     " mode or link should not",
2306
                                     errors.ECODE_INVAL)
2307

    
2308
      if op == constants.DDM_ADD:
2309
        macaddr = params.get(constants.INIC_MAC, None)
2310
        if macaddr is None:
2311
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2312

    
2313
      if ip is not None:
2314
        if ip.lower() == constants.VALUE_NONE:
2315
          params[constants.INIC_IP] = None
2316
        else:
2317
          if ip.lower() == constants.NIC_IP_POOL:
2318
            if op == constants.DDM_ADD and req_net is None:
2319
              raise errors.OpPrereqError("If ip=pool, parameter network"
2320
                                         " cannot be none",
2321
                                         errors.ECODE_INVAL)
2322
          else:
2323
            if not netutils.IPAddress.IsValid(ip):
2324
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2325
                                         errors.ECODE_INVAL)
2326

    
2327
      if constants.INIC_MAC in params:
2328
        macaddr = params[constants.INIC_MAC]
2329
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2330
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2331

    
2332
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2333
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2334
                                     " modifying an existing NIC",
2335
                                     errors.ECODE_INVAL)
2336

    
2337
  def CheckArguments(self):
2338
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2339
            self.op.hvparams or self.op.beparams or self.op.os_name or
2340
            self.op.offline is not None or self.op.runtime_mem or
2341
            self.op.pnode):
2342
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2343

    
2344
    if self.op.hvparams:
2345
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2346
                           "hypervisor", "instance", "cluster")
2347

    
2348
    self.op.disks = self._UpgradeDiskNicMods(
2349
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2350
    self.op.nics = self._UpgradeDiskNicMods(
2351
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2352

    
2353
    if self.op.disks and self.op.disk_template is not None:
2354
      raise errors.OpPrereqError("Disk template conversion and other disk"
2355
                                 " changes not supported at the same time",
2356
                                 errors.ECODE_INVAL)
2357

    
2358
    if (self.op.disk_template and
2359
        self.op.disk_template in constants.DTS_INT_MIRROR and
2360
        self.op.remote_node is None):
2361
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2362
                                 " one requires specifying a secondary node",
2363
                                 errors.ECODE_INVAL)
2364

    
2365
    # Check NIC modifications
2366
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2367
                    self._VerifyNicModification)
2368

    
2369
    if self.op.pnode:
2370
      (self.op.pnode_uuid, self.op.pnode) = \
2371
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2372

    
2373
  def ExpandNames(self):
2374
    self._ExpandAndLockInstance()
2375
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2376
    # Can't even acquire node locks in shared mode as upcoming changes in
2377
    # Ganeti 2.6 will start to modify the node object on disk conversion
2378
    self.needed_locks[locking.LEVEL_NODE] = []
2379
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2380
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2381
    # Look node group to look up the ipolicy
2382
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2383

    
2384
  def DeclareLocks(self, level):
2385
    if level == locking.LEVEL_NODEGROUP:
2386
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2387
      # Acquire locks for the instance's nodegroups optimistically. Needs
2388
      # to be verified in CheckPrereq
2389
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2390
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2391
    elif level == locking.LEVEL_NODE:
2392
      self._LockInstancesNodes()
2393
      if self.op.disk_template and self.op.remote_node:
2394
        (self.op.remote_node_uuid, self.op.remote_node) = \
2395
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2396
                                self.op.remote_node)
2397
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2398
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2399
      # Copy node locks
2400
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2401
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2402

    
2403
  def BuildHooksEnv(self):
2404
    """Build hooks env.
2405

2406
    This runs on the master, primary and secondaries.
2407

2408
    """
2409
    args = {}
2410
    if constants.BE_MINMEM in self.be_new:
2411
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2412
    if constants.BE_MAXMEM in self.be_new:
2413
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2414
    if constants.BE_VCPUS in self.be_new:
2415
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2416
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2417
    # information at all.
2418

    
2419
    if self._new_nics is not None:
2420
      nics = []
2421

    
2422
      for nic in self._new_nics:
2423
        n = copy.deepcopy(nic)
2424
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2425
        n.nicparams = nicparams
2426
        nics.append(NICToTuple(self, n))
2427

    
2428
      args["nics"] = nics
2429

    
2430
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2431
    if self.op.disk_template:
2432
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2433
    if self.op.runtime_mem:
2434
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2435

    
2436
    return env
2437

    
2438
  def BuildHooksNodes(self):
2439
    """Build hooks nodes.
2440

2441
    """
2442
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2443
    return (nl, nl)
2444

    
2445
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2446
                              old_params, cluster, pnode_uuid):
2447

    
2448
    update_params_dict = dict([(key, params[key])
2449
                               for key in constants.NICS_PARAMETERS
2450
                               if key in params])
2451

    
2452
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2453
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2454

    
2455
    new_net_uuid = None
2456
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2457
    if new_net_uuid_or_name:
2458
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2459
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2460

    
2461
    if old_net_uuid:
2462
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2463

    
2464
    if new_net_uuid:
2465
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2466
      if not netparams:
2467
        raise errors.OpPrereqError("No netparams found for the network"
2468
                                   " %s, probably not connected" %
2469
                                   new_net_obj.name, errors.ECODE_INVAL)
2470
      new_params = dict(netparams)
2471
    else:
2472
      new_params = GetUpdatedParams(old_params, update_params_dict)
2473

    
2474
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2475

    
2476
    new_filled_params = cluster.SimpleFillNIC(new_params)
2477
    objects.NIC.CheckParameterSyntax(new_filled_params)
2478

    
2479
    new_mode = new_filled_params[constants.NIC_MODE]
2480
    if new_mode == constants.NIC_MODE_BRIDGED:
2481
      bridge = new_filled_params[constants.NIC_LINK]
2482
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2483
      if msg:
2484
        msg = "Error checking bridges on node '%s': %s" % \
2485
                (self.cfg.GetNodeName(pnode_uuid), msg)
2486
        if self.op.force:
2487
          self.warn.append(msg)
2488
        else:
2489
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2490

    
2491
    elif new_mode == constants.NIC_MODE_ROUTED:
2492
      ip = params.get(constants.INIC_IP, old_ip)
2493
      if ip is None:
2494
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2495
                                   " on a routed NIC", errors.ECODE_INVAL)
2496

    
2497
    elif new_mode == constants.NIC_MODE_OVS:
2498
      # TODO: check OVS link
2499
      self.LogInfo("OVS links are currently not checked for correctness")
2500

    
2501
    if constants.INIC_MAC in params:
2502
      mac = params[constants.INIC_MAC]
2503
      if mac is None:
2504
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2505
                                   errors.ECODE_INVAL)
2506
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2507
        # otherwise generate the MAC address
2508
        params[constants.INIC_MAC] = \
2509
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2510
      else:
2511
        # or validate/reserve the current one
2512
        try:
2513
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2514
        except errors.ReservationError:
2515
          raise errors.OpPrereqError("MAC address '%s' already in use"
2516
                                     " in cluster" % mac,
2517
                                     errors.ECODE_NOTUNIQUE)
2518
    elif new_net_uuid != old_net_uuid:
2519

    
2520
      def get_net_prefix(net_uuid):
2521
        mac_prefix = None
2522
        if net_uuid:
2523
          nobj = self.cfg.GetNetwork(net_uuid)
2524
          mac_prefix = nobj.mac_prefix
2525

    
2526
        return mac_prefix
2527

    
2528
      new_prefix = get_net_prefix(new_net_uuid)
2529
      old_prefix = get_net_prefix(old_net_uuid)
2530
      if old_prefix != new_prefix:
2531
        params[constants.INIC_MAC] = \
2532
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2533

    
2534
    # if there is a change in (ip, network) tuple
2535
    new_ip = params.get(constants.INIC_IP, old_ip)
2536
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2537
      if new_ip:
2538
        # if IP is pool then require a network and generate one IP
2539
        if new_ip.lower() == constants.NIC_IP_POOL:
2540
          if new_net_uuid:
2541
            try:
2542
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2543
            except errors.ReservationError:
2544
              raise errors.OpPrereqError("Unable to get a free IP"
2545
                                         " from the address pool",
2546
                                         errors.ECODE_STATE)
2547
            self.LogInfo("Chose IP %s from network %s",
2548
                         new_ip,
2549
                         new_net_obj.name)
2550
            params[constants.INIC_IP] = new_ip
2551
          else:
2552
            raise errors.OpPrereqError("ip=pool, but no network found",
2553
                                       errors.ECODE_INVAL)
2554
        # Reserve new IP if in the new network if any
2555
        elif new_net_uuid:
2556
          try:
2557
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2558
            self.LogInfo("Reserving IP %s in network %s",
2559
                         new_ip, new_net_obj.name)
2560
          except errors.ReservationError:
2561
            raise errors.OpPrereqError("IP %s not available in network %s" %
2562
                                       (new_ip, new_net_obj.name),
2563
                                       errors.ECODE_NOTUNIQUE)
2564
        # new network is None so check if new IP is a conflicting IP
2565
        elif self.op.conflicts_check:
2566
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2567

    
2568
      # release old IP if old network is not None
2569
      if old_ip and old_net_uuid:
2570
        try:
2571
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2572
        except errors.AddressPoolError:
2573
          logging.warning("Release IP %s not contained in network %s",
2574
                          old_ip, old_net_obj.name)
2575

    
2576
    # there are no changes in (ip, network) tuple and old network is not None
2577
    elif (old_net_uuid is not None and
2578
          (req_link is not None or req_mode is not None)):
2579
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2580
                                 " a NIC that is connected to a network",
2581
                                 errors.ECODE_INVAL)
2582

    
2583
    private.params = new_params
2584
    private.filled = new_filled_params
2585

    
2586
  def _PreCheckDiskTemplate(self, pnode_info):
2587
    """CheckPrereq checks related to a new disk template."""
2588
    # Arguments are passed to avoid configuration lookups
2589
    pnode_uuid = self.instance.primary_node
2590
    if self.instance.disk_template == self.op.disk_template:
2591
      raise errors.OpPrereqError("Instance already has disk template %s" %
2592
                                 self.instance.disk_template,
2593
                                 errors.ECODE_INVAL)
2594

    
2595
    if (self.instance.disk_template,
2596
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2597
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2598
                                 " %s to %s" % (self.instance.disk_template,
2599
                                                self.op.disk_template),
2600
                                 errors.ECODE_INVAL)
2601
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2602
                       msg="cannot change disk template")
2603
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2604
      if self.op.remote_node_uuid == pnode_uuid:
2605
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2606
                                   " as the primary node of the instance" %
2607
                                   self.op.remote_node, errors.ECODE_STATE)
2608
      CheckNodeOnline(self, self.op.remote_node_uuid)
2609
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2610
      # FIXME: here we assume that the old instance type is DT_PLAIN
2611
      assert self.instance.disk_template == constants.DT_PLAIN
2612
      disks = [{constants.IDISK_SIZE: d.size,
2613
                constants.IDISK_VG: d.logical_id[0]}
2614
               for d in self.instance.disks]
2615
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2616
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2617

    
2618
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2619
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2620
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2621
                                                              snode_group)
2622
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2623
                             ignore=self.op.ignore_ipolicy)
2624
      if pnode_info.group != snode_info.group:
2625
        self.LogWarning("The primary and secondary nodes are in two"
2626
                        " different node groups; the disk parameters"
2627
                        " from the first disk's node group will be"
2628
                        " used")
2629

    
2630
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2631
      # Make sure none of the nodes require exclusive storage
2632
      nodes = [pnode_info]
2633
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2634
        assert snode_info
2635
        nodes.append(snode_info)
2636
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2637
      if compat.any(map(has_es, nodes)):
2638
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2639
                  " storage is enabled" % (self.instance.disk_template,
2640
                                           self.op.disk_template))
2641
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2642

    
2643
  def _PreCheckDisks(self, ispec):
2644
    """CheckPrereq checks related to disk changes.
2645

2646
    @type ispec: dict
2647
    @param ispec: instance specs to be updated with the new disks
2648

2649
    """
2650
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2651

    
2652
    excl_stor = compat.any(
2653
      rpc.GetExclusiveStorageForNodes(self.cfg,
2654
                                      self.instance.all_nodes).values()
2655
      )
2656

    
2657
    # Check disk modifications. This is done here and not in CheckArguments
2658
    # (as with NICs), because we need to know the instance's disk template
2659
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2660
    if self.instance.disk_template == constants.DT_EXT:
2661
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2662
    else:
2663
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2664
                      ver_fn)
2665

    
2666
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2667

    
2668
    # Check the validity of the `provider' parameter
2669
    if self.instance.disk_template in constants.DT_EXT:
2670
      for mod in self.diskmod:
2671
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2672
        if mod[0] == constants.DDM_ADD:
2673
          if ext_provider is None:
2674
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2675
                                       " '%s' missing, during disk add" %
2676
                                       (constants.DT_EXT,
2677
                                        constants.IDISK_PROVIDER),
2678
                                       errors.ECODE_NOENT)
2679
        elif mod[0] == constants.DDM_MODIFY:
2680
          if ext_provider:
2681
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2682
                                       " modification" %
2683
                                       constants.IDISK_PROVIDER,
2684
                                       errors.ECODE_INVAL)
2685
    else:
2686
      for mod in self.diskmod:
2687
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2688
        if ext_provider is not None:
2689
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2690
                                     " instances of type '%s'" %
2691
                                     (constants.IDISK_PROVIDER,
2692
                                      constants.DT_EXT),
2693
                                     errors.ECODE_INVAL)
2694

    
2695
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2696
      raise errors.OpPrereqError("Disk operations not supported for"
2697
                                 " diskless instances", errors.ECODE_INVAL)
2698

    
2699
    def _PrepareDiskMod(_, disk, params, __):
2700
      disk.name = params.get(constants.IDISK_NAME, None)
2701

    
2702
    # Verify disk changes (operating on a copy)
2703
    disks = copy.deepcopy(self.instance.disks)
2704
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2705
                        _PrepareDiskMod, None)
2706
    utils.ValidateDeviceNames("disk", disks)
2707
    if len(disks) > constants.MAX_DISKS:
2708
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2709
                                 " more" % constants.MAX_DISKS,
2710
                                 errors.ECODE_STATE)
2711
    disk_sizes = [disk.size for disk in self.instance.disks]
2712
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2713
                      self.diskmod if op == constants.DDM_ADD)
2714
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2715
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2716

    
2717
    if self.op.offline is not None and self.op.offline:
2718
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2719
                         msg="can't change to offline")
2720

    
2721
  def CheckPrereq(self):
2722
    """Check prerequisites.
2723

2724
    This only checks the instance list against the existing names.
2725

2726
    """
2727
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2728
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2729
    self.cluster = self.cfg.GetClusterInfo()
2730

    
2731
    assert self.instance is not None, \
2732
      "Cannot retrieve locked instance %s" % self.op.instance_name
2733

    
2734
    pnode_uuid = self.instance.primary_node
2735

    
2736
    self.warn = []
2737

    
2738
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2739
        not self.op.force):
2740
      # verify that the instance is not up
2741
      instance_info = self.rpc.call_instance_info(
2742
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2743
          self.instance.hvparams)
2744
      if instance_info.fail_msg:
2745
        self.warn.append("Can't get instance runtime information: %s" %
2746
                         instance_info.fail_msg)
2747
      elif instance_info.payload:
2748
        raise errors.OpPrereqError("Instance is still running on %s" %
2749
                                   self.cfg.GetNodeName(pnode_uuid),
2750
                                   errors.ECODE_STATE)
2751

    
2752
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2753
    node_uuids = list(self.instance.all_nodes)
2754
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2755

    
2756
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2757
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2758
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2759

    
2760
    # dictionary with instance information after the modification
2761
    ispec = {}
2762

    
2763
    # Prepare NIC modifications
2764
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2765

    
2766
    # OS change
2767
    if self.op.os_name and not self.op.force:
2768
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2769
                     self.op.force_variant)
2770
      instance_os = self.op.os_name
2771
    else:
2772
      instance_os = self.instance.os
2773

    
2774
    assert not (self.op.disk_template and self.op.disks), \
2775
      "Can't modify disk template and apply disk changes at the same time"
2776

    
2777
    if self.op.disk_template:
2778
      self._PreCheckDiskTemplate(pnode_info)
2779

    
2780
    self._PreCheckDisks(ispec)
2781

    
2782
    # hvparams processing
2783
    if self.op.hvparams:
2784
      hv_type = self.instance.hypervisor
2785
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2786
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2787
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2788

    
2789
      # local check
2790
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2791
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2792
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2793
      self.hv_inst = i_hvdict # the new dict (without defaults)
2794
    else:
2795
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2796
                                                   self.instance.os,
2797
                                                   self.instance.hvparams)
2798
      self.hv_new = self.hv_inst = {}
2799

    
2800
    # beparams processing
2801
    if self.op.beparams:
2802
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2803
                                  use_none=True)
2804
      objects.UpgradeBeParams(i_bedict)
2805
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2806
      be_new = self.cluster.SimpleFillBE(i_bedict)
2807
      self.be_proposed = self.be_new = be_new # the new actual values
2808
      self.be_inst = i_bedict # the new dict (without defaults)
2809
    else:
2810
      self.be_new = self.be_inst = {}
2811
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2812
    be_old = self.cluster.FillBE(self.instance)
2813

    
2814
    # CPU param validation -- checking every time a parameter is
2815
    # changed to cover all cases where either CPU mask or vcpus have
2816
    # changed
2817
    if (constants.BE_VCPUS in self.be_proposed and
2818
        constants.HV_CPU_MASK in self.hv_proposed):
2819
      cpu_list = \
2820
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2821
      # Verify mask is consistent with number of vCPUs. Can skip this
2822
      # test if only 1 entry in the CPU mask, which means same mask
2823
      # is applied to all vCPUs.
2824
      if (len(cpu_list) > 1 and
2825
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2826
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2827
                                   " CPU mask [%s]" %
2828
                                   (self.be_proposed[constants.BE_VCPUS],
2829
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2830
                                   errors.ECODE_INVAL)
2831

    
2832
      # Only perform this test if a new CPU mask is given
2833
      if constants.HV_CPU_MASK in self.hv_new:
2834
        # Calculate the largest CPU number requested
2835
        max_requested_cpu = max(map(max, cpu_list))
2836
        # Check that all of the instance's nodes have enough physical CPUs to
2837
        # satisfy the requested CPU mask
2838
        hvspecs = [(self.instance.hypervisor,
2839
                    self.cfg.GetClusterInfo()
2840
                      .hvparams[self.instance.hypervisor])]
2841
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2842
                                max_requested_cpu + 1,
2843
                                hvspecs)
2844

    
2845
    # osparams processing
2846
    if self.op.osparams:
2847
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2848
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2849
      self.os_inst = i_osdict # the new dict (without defaults)
2850
    else:
2851
      self.os_inst = {}
2852

    
2853
    #TODO(dynmem): do the appropriate check involving MINMEM
2854
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2855
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2856
      mem_check_list = [pnode_uuid]
2857
      if be_new[constants.BE_AUTO_BALANCE]:
2858
        # either we changed auto_balance to yes or it was from before
2859
        mem_check_list.extend(self.instance.secondary_nodes)
2860
      instance_info = self.rpc.call_instance_info(
2861
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2862
          self.instance.hvparams)
2863
      hvspecs = [(self.instance.hypervisor,
2864
                  self.cluster.hvparams[self.instance.hypervisor])]
2865
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2866
                                         hvspecs)
2867
      pninfo = nodeinfo[pnode_uuid]
2868
      msg = pninfo.fail_msg
2869
      if msg:
2870
        # Assume the primary node is unreachable and go ahead
2871
        self.warn.append("Can't get info from primary node %s: %s" %
2872
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2873
      else:
2874
        (_, _, (pnhvinfo, )) = pninfo.payload
2875
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2876
          self.warn.append("Node data from primary node %s doesn't contain"
2877
                           " free memory information" %
2878
                           self.cfg.GetNodeName(pnode_uuid))
2879
        elif instance_info.fail_msg:
2880
          self.warn.append("Can't get instance runtime information: %s" %
2881
                           instance_info.fail_msg)
2882
        else:
2883
          if instance_info.payload:
2884
            current_mem = int(instance_info.payload["memory"])
2885
          else:
2886
            # Assume instance not running
2887
            # (there is a slight race condition here, but it's not very
2888
            # probable, and we have no other way to check)
2889
            # TODO: Describe race condition
2890
            current_mem = 0
2891
          #TODO(dynmem): do the appropriate check involving MINMEM
2892
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2893
                      pnhvinfo["memory_free"])
2894
          if miss_mem > 0:
2895
            raise errors.OpPrereqError("This change will prevent the instance"
2896
                                       " from starting, due to %d MB of memory"
2897
                                       " missing on its primary node" %
2898
                                       miss_mem, errors.ECODE_NORES)
2899

    
2900
      if be_new[constants.BE_AUTO_BALANCE]:
2901
        for node_uuid, nres in nodeinfo.items():
2902
          if node_uuid not in self.instance.secondary_nodes:
2903
            continue
2904
          nres.Raise("Can't get info from secondary node %s" %
2905
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2906
                     ecode=errors.ECODE_STATE)
2907
          (_, _, (nhvinfo, )) = nres.payload
2908
          if not isinstance(nhvinfo.get("memory_free", None), int):
2909
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2910
                                       " memory information" %
2911
                                       self.cfg.GetNodeName(node_uuid),
2912
                                       errors.ECODE_STATE)
2913
          #TODO(dynmem): do the appropriate check involving MINMEM
2914
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2915
            raise errors.OpPrereqError("This change will prevent the instance"
2916
                                       " from failover to its secondary node"
2917
                                       " %s, due to not enough memory" %
2918
                                       self.cfg.GetNodeName(node_uuid),
2919
                                       errors.ECODE_STATE)
2920

    
2921
    if self.op.runtime_mem:
2922
      remote_info = self.rpc.call_instance_info(
2923
         self.instance.primary_node, self.instance.name,
2924
         self.instance.hypervisor,
2925
         self.cluster.hvparams[self.instance.hypervisor])
2926
      remote_info.Raise("Error checking node %s" %
2927
                        self.cfg.GetNodeName(self.instance.primary_node))
2928
      if not remote_info.payload: # not running already
2929
        raise errors.OpPrereqError("Instance %s is not running" %
2930
                                   self.instance.name, errors.ECODE_STATE)
2931

    
2932
      current_memory = remote_info.payload["memory"]
2933
      if (not self.op.force and
2934
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2935
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2936
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2937
                                   " and %d MB of memory unless --force is"
2938
                                   " given" %
2939
                                   (self.instance.name,
2940
                                    self.be_proposed[constants.BE_MINMEM],
2941
                                    self.be_proposed[constants.BE_MAXMEM]),
2942
                                   errors.ECODE_INVAL)
2943

    
2944
      delta = self.op.runtime_mem - current_memory
2945
      if delta > 0:
2946
        CheckNodeFreeMemory(
2947
            self, self.instance.primary_node,
2948
            "ballooning memory for instance %s" % self.instance.name, delta,
2949
            self.instance.hypervisor,
2950
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2951

    
2952
    # make self.cluster visible in the functions below
2953
    cluster = self.cluster
2954

    
2955
    def _PrepareNicCreate(_, params, private):
2956
      self._PrepareNicModification(params, private, None, None,
2957
                                   {}, cluster, pnode_uuid)
2958
      return (None, None)
2959

    
2960
    def _PrepareNicMod(_, nic, params, private):
2961
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2962
                                   nic.nicparams, cluster, pnode_uuid)
2963
      return None
2964

    
2965
    def _PrepareNicRemove(_, params, __):
2966
      ip = params.ip
2967
      net = params.network
2968
      if net is not None and ip is not None:
2969
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2970

    
2971
    # Verify NIC changes (operating on copy)
2972
    nics = self.instance.nics[:]
2973
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2974
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2975
    if len(nics) > constants.MAX_NICS:
2976
      raise errors.OpPrereqError("Instance has too many network interfaces"
2977
                                 " (%d), cannot add more" % constants.MAX_NICS,
2978
                                 errors.ECODE_STATE)
2979

    
2980
    # Pre-compute NIC changes (necessary to use result in hooks)
2981
    self._nic_chgdesc = []
2982
    if self.nicmod:
2983
      # Operate on copies as this is still in prereq
2984
      nics = [nic.Copy() for nic in self.instance.nics]
2985
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2986
                          self._CreateNewNic, self._ApplyNicMods, None)
2987
      # Verify that NIC names are unique and valid
2988
      utils.ValidateDeviceNames("NIC", nics)
2989
      self._new_nics = nics
2990
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2991
    else:
2992
      self._new_nics = None
2993
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
2994

    
2995
    if not self.op.ignore_ipolicy:
2996
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2997
                                                              group_info)
2998

    
2999
      # Fill ispec with backend parameters
3000
      ispec[constants.ISPEC_SPINDLE_USE] = \
3001
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3002
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3003
                                                         None)
3004

    
3005
      # Copy ispec to verify parameters with min/max values separately
3006
      if self.op.disk_template:
3007
        new_disk_template = self.op.disk_template
3008
      else:
3009
        new_disk_template = self.instance.disk_template
3010
      ispec_max = ispec.copy()
3011
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3012
        self.be_new.get(constants.BE_MAXMEM, None)
3013
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3014
                                                     new_disk_template)
3015
      ispec_min = ispec.copy()
3016
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3017
        self.be_new.get(constants.BE_MINMEM, None)
3018
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3019
                                                     new_disk_template)
3020

    
3021
      if (res_max or res_min):
3022
        # FIXME: Improve error message by including information about whether
3023
        # the upper or lower limit of the parameter fails the ipolicy.
3024
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3025
               (group_info, group_info.name,
3026
                utils.CommaJoin(set(res_max + res_min))))
3027
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3028

    
3029
  def _ConvertPlainToDrbd(self, feedback_fn):
3030
    """Converts an instance from plain to drbd.
3031

3032
    """
3033
    feedback_fn("Converting template to drbd")
3034
    pnode_uuid = self.instance.primary_node
3035
    snode_uuid = self.op.remote_node_uuid
3036

    
3037
    assert self.instance.disk_template == constants.DT_PLAIN
3038

    
3039
    # create a fake disk info for _GenerateDiskTemplate
3040
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3041
                  constants.IDISK_VG: d.logical_id[0],
3042
                  constants.IDISK_NAME: d.name}
3043
                 for d in self.instance.disks]
3044
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3045
                                     self.instance.uuid, pnode_uuid,
3046
                                     [snode_uuid], disk_info, None, None, 0,
3047
                                     feedback_fn, self.diskparams)
3048
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3049
                                        self.diskparams)
3050
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3051
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3052
    info = GetInstanceInfoText(self.instance)
3053
    feedback_fn("Creating additional volumes...")
3054
    # first, create the missing data and meta devices
3055
    for disk in anno_disks:
3056
      # unfortunately this is... not too nice
3057
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3058
                           info, True, p_excl_stor)
3059
      for child in disk.children:
3060
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3061
                             s_excl_stor)
3062
    # at this stage, all new LVs have been created, we can rename the
3063
    # old ones
3064
    feedback_fn("Renaming original volumes...")
3065
    rename_list = [(o, n.children[0].logical_id)
3066
                   for (o, n) in zip(self.instance.disks, new_disks)]
3067
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3068
    result.Raise("Failed to rename original LVs")
3069

    
3070
    feedback_fn("Initializing DRBD devices...")
3071
    # all child devices are in place, we can now create the DRBD devices
3072
    try:
3073
      for disk in anno_disks:
3074
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3075
                                       (snode_uuid, s_excl_stor)]:
3076
          f_create = node_uuid == pnode_uuid
3077
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3078
                               f_create, excl_stor)
3079
    except errors.GenericError, e:
3080
      feedback_fn("Initializing of DRBD devices failed;"
3081
                  " renaming back original volumes...")
3082
      for disk in new_disks:
3083
        self.cfg.SetDiskID(disk, pnode_uuid)
3084
      rename_back_list = [(n.children[0], o.logical_id)
3085
                          for (n, o) in zip(new_disks, self.instance.disks)]
3086
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3087
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3088
      raise
3089

    
3090
    # at this point, the instance has been modified
3091
    self.instance.disk_template = constants.DT_DRBD8
3092
    self.instance.disks = new_disks
3093
    self.cfg.Update(self.instance, feedback_fn)
3094

    
3095
    # Release node locks while waiting for sync
3096
    ReleaseLocks(self, locking.LEVEL_NODE)
3097

    
3098
    # disks are created, waiting for sync
3099
    disk_abort = not WaitForSync(self, self.instance,
3100
                                 oneshot=not self.op.wait_for_sync)
3101
    if disk_abort:
3102
      raise errors.OpExecError("There are some degraded disks for"
3103
                               " this instance, please cleanup manually")
3104

    
3105
    # Node resource locks will be released by caller
3106

    
3107
  def _ConvertDrbdToPlain(self, feedback_fn):
3108
    """Converts an instance from drbd to plain.
3109

3110
    """
3111
    assert len(self.instance.secondary_nodes) == 1
3112
    assert self.instance.disk_template == constants.DT_DRBD8
3113

    
3114
    pnode_uuid = self.instance.primary_node
3115
    snode_uuid = self.instance.secondary_nodes[0]
3116
    feedback_fn("Converting template to plain")
3117

    
3118
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3119
    new_disks = [d.children[0] for d in self.instance.disks]
3120

    
3121
    # copy over size, mode and name
3122
    for parent, child in zip(old_disks, new_disks):
3123
      child.size = parent.size
3124
      child.mode = parent.mode
3125
      child.name = parent.name
3126

    
3127
    # this is a DRBD disk, return its port to the pool
3128
    # NOTE: this must be done right before the call to cfg.Update!
3129
    for disk in old_disks:
3130
      tcp_port = disk.logical_id[2]
3131
      self.cfg.AddTcpUdpPort(tcp_port)
3132

    
3133
    # update instance structure
3134
    self.instance.disks = new_disks
3135
    self.instance.disk_template = constants.DT_PLAIN
3136
    _UpdateIvNames(0, self.instance.disks)
3137
    self.cfg.Update(self.instance, feedback_fn)
3138

    
3139
    # Release locks in case removing disks takes a while
3140
    ReleaseLocks(self, locking.LEVEL_NODE)
3141

    
3142
    feedback_fn("Removing volumes on the secondary node...")
3143
    for disk in old_disks:
3144
      self.cfg.SetDiskID(disk, snode_uuid)
3145
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3146
      if msg:
3147
        self.LogWarning("Could not remove block device %s on node %s,"
3148
                        " continuing anyway: %s", disk.iv_name,
3149
                        self.cfg.GetNodeName(snode_uuid), msg)
3150

    
3151
    feedback_fn("Removing unneeded volumes on the primary node...")
3152
    for idx, disk in enumerate(old_disks):
3153
      meta = disk.children[1]
3154
      self.cfg.SetDiskID(meta, pnode_uuid)
3155
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3156
      if msg:
3157
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3158
                        " continuing anyway: %s", idx,
3159
                        self.cfg.GetNodeName(pnode_uuid), msg)
3160

    
3161
  def _CreateNewDisk(self, idx, params, _):
3162
    """Creates a new disk.
3163

3164
    """
3165
    # add a new disk
3166
    if self.instance.disk_template in constants.DTS_FILEBASED:
3167
      (file_driver, file_path) = self.instance.disks[0].logical_id
3168
      file_path = os.path.dirname(file_path)
3169
    else:
3170
      file_driver = file_path = None
3171

    
3172
    disk = \
3173
      GenerateDiskTemplate(self, self.instance.disk_template,
3174
                           self.instance.uuid, self.instance.primary_node,
3175
                           self.instance.secondary_nodes, [params], file_path,
3176
                           file_driver, idx, self.Log, self.diskparams)[0]
3177

    
3178
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3179

    
3180
    if self.cluster.prealloc_wipe_disks:
3181
      # Wipe new disk
3182
      WipeOrCleanupDisks(self, self.instance,
3183
                         disks=[(idx, disk, 0)],
3184
                         cleanup=new_disks)
3185

    
3186
    return (disk, [
3187
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3188
      ])
3189

    
3190
  @staticmethod
3191
  def _ModifyDisk(idx, disk, params, _):
3192
    """Modifies a disk.
3193

3194
    """
3195
    changes = []
3196
    mode = params.get(constants.IDISK_MODE, None)
3197
    if mode:
3198
      disk.mode = mode
3199
      changes.append(("disk.mode/%d" % idx, disk.mode))
3200

    
3201
    name = params.get(constants.IDISK_NAME, None)
3202
    disk.name = name
3203
    changes.append(("disk.name/%d" % idx, disk.name))
3204

    
3205
    return changes
3206

    
3207
  def _RemoveDisk(self, idx, root, _):
3208
    """Removes a disk.
3209

3210
    """
3211
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3212
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3213
                             self.instance.primary_node):
3214
      self.cfg.SetDiskID(disk, node_uuid)
3215
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3216
      if msg:
3217
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3218
                        " continuing anyway", idx,
3219
                        self.cfg.GetNodeName(node_uuid), msg)
3220

    
3221
    # if this is a DRBD disk, return its port to the pool
3222
    if root.dev_type in constants.LDS_DRBD:
3223
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3224

    
3225
  def _CreateNewNic(self, idx, params, private):
3226
    """Creates data structure for a new network interface.
3227

3228
    """
3229
    mac = params[constants.INIC_MAC]
3230
    ip = params.get(constants.INIC_IP, None)
3231
    net = params.get(constants.INIC_NETWORK, None)
3232
    name = params.get(constants.INIC_NAME, None)
3233
    net_uuid = self.cfg.LookupNetwork(net)
3234
    #TODO: not private.filled?? can a nic have no nicparams??
3235
    nicparams = private.filled
3236
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3237
                       nicparams=nicparams)
3238
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3239

    
3240
    return (nobj, [
3241
      ("nic.%d" % idx,
3242
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3243
       (mac, ip, private.filled[constants.NIC_MODE],
3244
       private.filled[constants.NIC_LINK],
3245
       net)),
3246
      ])
3247

    
3248
  def _ApplyNicMods(self, idx, nic, params, private):
3249
    """Modifies a network interface.
3250

3251
    """
3252
    changes = []
3253

    
3254
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3255
      if key in params:
3256
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3257
        setattr(nic, key, params[key])
3258

    
3259
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3260
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3261
    if new_net_uuid != nic.network:
3262
      changes.append(("nic.network/%d" % idx, new_net))
3263
      nic.network = new_net_uuid
3264

    
3265
    if private.filled:
3266
      nic.nicparams = private.filled
3267

    
3268
      for (key, val) in nic.nicparams.items():
3269
        changes.append(("nic.%s/%d" % (key, idx), val))
3270

    
3271
    return changes
3272

    
3273
  def Exec(self, feedback_fn):
3274
    """Modifies an instance.
3275

3276
    All parameters take effect only at the next restart of the instance.
3277

3278
    """
3279
    # Process here the warnings from CheckPrereq, as we don't have a
3280
    # feedback_fn there.
3281
    # TODO: Replace with self.LogWarning
3282
    for warn in self.warn:
3283
      feedback_fn("WARNING: %s" % warn)
3284

    
3285
    assert ((self.op.disk_template is None) ^
3286
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3287
      "Not owning any node resource locks"
3288

    
3289
    result = []
3290

    
3291
    # New primary node
3292
    if self.op.pnode_uuid:
3293
      self.instance.primary_node = self.op.pnode_uuid
3294

    
3295
    # runtime memory
3296
    if self.op.runtime_mem:
3297
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3298
                                                     self.instance,
3299
                                                     self.op.runtime_mem)
3300
      rpcres.Raise("Cannot modify instance runtime memory")
3301
      result.append(("runtime_memory", self.op.runtime_mem))
3302

    
3303
    # Apply disk changes
3304
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3305
                        self._CreateNewDisk, self._ModifyDisk,
3306
                        self._RemoveDisk)
3307
    _UpdateIvNames(0, self.instance.disks)
3308

    
3309
    if self.op.disk_template:
3310
      if __debug__:
3311
        check_nodes = set(self.instance.all_nodes)
3312
        if self.op.remote_node_uuid:
3313
          check_nodes.add(self.op.remote_node_uuid)
3314
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3315
          owned = self.owned_locks(level)
3316
          assert not (check_nodes - owned), \
3317
            ("Not owning the correct locks, owning %r, expected at least %r" %
3318
             (owned, check_nodes))
3319

    
3320
      r_shut = ShutdownInstanceDisks(self, self.instance)
3321
      if not r_shut:
3322
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3323
                                 " proceed with disk template conversion")
3324
      mode = (self.instance.disk_template, self.op.disk_template)
3325
      try:
3326
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3327
      except:
3328
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3329
        raise
3330
      result.append(("disk_template", self.op.disk_template))
3331

    
3332
      assert self.instance.disk_template == self.op.disk_template, \
3333
        ("Expected disk template '%s', found '%s'" %
3334
         (self.op.disk_template, self.instance.disk_template))
3335

    
3336
    # Release node and resource locks if there are any (they might already have
3337
    # been released during disk conversion)
3338
    ReleaseLocks(self, locking.LEVEL_NODE)
3339
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3340

    
3341
    # Apply NIC changes
3342
    if self._new_nics is not None:
3343
      self.instance.nics = self._new_nics
3344
      result.extend(self._nic_chgdesc)
3345

    
3346
    # hvparams changes
3347
    if self.op.hvparams:
3348
      self.instance.hvparams = self.hv_inst
3349
      for key, val in self.op.hvparams.iteritems():
3350
        result.append(("hv/%s" % key, val))
3351

    
3352
    # beparams changes
3353
    if self.op.beparams:
3354
      self.instance.beparams = self.be_inst
3355
      for key, val in self.op.beparams.iteritems():
3356
        result.append(("be/%s" % key, val))
3357

    
3358
    # OS change
3359
    if self.op.os_name:
3360
      self.instance.os = self.op.os_name
3361

    
3362
    # osparams changes
3363
    if self.op.osparams:
3364
      self.instance.osparams = self.os_inst
3365
      for key, val in self.op.osparams.iteritems():
3366
        result.append(("os/%s" % key, val))
3367

    
3368
    if self.op.offline is None:
3369
      # Ignore
3370
      pass
3371
    elif self.op.offline:
3372
      # Mark instance as offline
3373
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3374
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3375
    else:
3376
      # Mark instance as online, but stopped
3377
      self.cfg.MarkInstanceDown(self.instance.uuid)
3378
      result.append(("admin_state", constants.ADMINST_DOWN))
3379

    
3380
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3381

    
3382
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3383
                self.owned_locks(locking.LEVEL_NODE)), \
3384
      "All node locks should have been released by now"
3385

    
3386
    return result
3387

    
3388
  _DISK_CONVERSIONS = {
3389
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3390
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3391
    }
3392

    
3393

    
3394
class LUInstanceChangeGroup(LogicalUnit):
3395
  HPATH = "instance-change-group"
3396
  HTYPE = constants.HTYPE_INSTANCE
3397
  REQ_BGL = False
3398

    
3399
  def ExpandNames(self):
3400
    self.share_locks = ShareAll()
3401

    
3402
    self.needed_locks = {
3403
      locking.LEVEL_NODEGROUP: [],
3404
      locking.LEVEL_NODE: [],
3405
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3406
      }
3407

    
3408
    self._ExpandAndLockInstance()
3409

    
3410
    if self.op.target_groups:
3411
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3412
                                  self.op.target_groups)
3413
    else:
3414
      self.req_target_uuids = None
3415

    
3416
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3417

    
3418
  def DeclareLocks(self, level):
3419
    if level == locking.LEVEL_NODEGROUP:
3420
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3421

    
3422
      if self.req_target_uuids:
3423
        lock_groups = set(self.req_target_uuids)
3424

    
3425
        # Lock all groups used by instance optimistically; this requires going
3426
        # via the node before it's locked, requiring verification later on
3427
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3428
        lock_groups.update(instance_groups)
3429
      else:
3430
        # No target groups, need to lock all of them
3431
        lock_groups = locking.ALL_SET
3432

    
3433
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3434

    
3435
    elif level == locking.LEVEL_NODE:
3436
      if self.req_target_uuids:
3437
        # Lock all nodes used by instances
3438
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3439
        self._LockInstancesNodes()
3440

    
3441
        # Lock all nodes in all potential target groups
3442
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3443
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3444
        member_nodes = [node_uuid
3445
                        for group in lock_groups
3446
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3447
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3448
      else:
3449
        # Lock all nodes as all groups are potential targets
3450
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3451

    
3452
  def CheckPrereq(self):
3453
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3454
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3455
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3456

    
3457
    assert (self.req_target_uuids is None or
3458
            owned_groups.issuperset(self.req_target_uuids))
3459
    assert owned_instance_names == set([self.op.instance_name])
3460

    
3461
    # Get instance information
3462
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3463

    
3464
    # Check if node groups for locked instance are still correct
3465
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3466
      ("Instance %s's nodes changed while we kept the lock" %
3467
       self.op.instance_name)
3468

    
3469
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3470
                                          owned_groups)
3471

    
3472
    if self.req_target_uuids:
3473
      # User requested specific target groups
3474
      self.target_uuids = frozenset(self.req_target_uuids)
3475
    else:
3476
      # All groups except those used by the instance are potential targets
3477
      self.target_uuids = owned_groups - inst_groups
3478

    
3479
    conflicting_groups = self.target_uuids & inst_groups
3480
    if conflicting_groups:
3481
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3482
                                 " used by the instance '%s'" %
3483
                                 (utils.CommaJoin(conflicting_groups),
3484
                                  self.op.instance_name),
3485
                                 errors.ECODE_INVAL)
3486

    
3487
    if not self.target_uuids:
3488
      raise errors.OpPrereqError("There are no possible target groups",
3489
                                 errors.ECODE_INVAL)
3490

    
3491
  def BuildHooksEnv(self):
3492
    """Build hooks env.
3493

3494
    """
3495
    assert self.target_uuids
3496

    
3497
    env = {
3498
      "TARGET_GROUPS": " ".join(self.target_uuids),
3499
      }
3500

    
3501
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3502

    
3503
    return env
3504

    
3505
  def BuildHooksNodes(self):
3506
    """Build hooks nodes.
3507

3508
    """
3509
    mn = self.cfg.GetMasterNode()
3510
    return ([mn], [mn])
3511

    
3512
  def Exec(self, feedback_fn):
3513
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3514

    
3515
    assert instances == [self.op.instance_name], "Instance not locked"
3516

    
3517
    req = iallocator.IAReqGroupChange(instances=instances,
3518
                                      target_groups=list(self.target_uuids))
3519
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3520

    
3521
    ial.Run(self.op.iallocator)
3522

    
3523
    if not ial.success:
3524
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3525
                                 " instance '%s' using iallocator '%s': %s" %
3526
                                 (self.op.instance_name, self.op.iallocator,
3527
                                  ial.info), errors.ECODE_NORES)
3528

    
3529
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3530

    
3531
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3532
                 " instance '%s'", len(jobs), self.op.instance_name)
3533

    
3534
    return ResultWithJobs(jobs)