Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 5be00b4b

History | View | Annotate | Download (140.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234

    
235
    check_params = cluster.SimpleFillNIC(nicparams)
236
    objects.NIC.CheckParameterSyntax(check_params)
237
    net_uuid = cfg.LookupNetwork(net)
238
    name = nic.get(constants.INIC_NAME, None)
239
    if name is not None and name.lower() == constants.VALUE_NONE:
240
      name = None
241
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242
                          network=net_uuid, nicparams=nicparams)
243
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
244
    nics.append(nic_obj)
245

    
246
  return nics
247

    
248

    
249
def _CheckForConflictingIp(lu, ip, node_uuid):
250
  """In case of conflicting IP address raise error.
251

252
  @type ip: string
253
  @param ip: IP address
254
  @type node_uuid: string
255
  @param node_uuid: node UUID
256

257
  """
258
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259
  if conf_net is not None:
260
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261
                                " network %s, but the target NIC does not." %
262
                                (ip, conf_net)),
263
                               errors.ECODE_STATE)
264

    
265
  return (None, None)
266

    
267

    
268
def _ComputeIPolicyInstanceSpecViolation(
269
  ipolicy, instance_spec, disk_template,
270
  _compute_fn=ComputeIPolicySpecViolation):
271
  """Compute if instance specs meets the specs of ipolicy.
272

273
  @type ipolicy: dict
274
  @param ipolicy: The ipolicy to verify against
275
  @param instance_spec: dict
276
  @param instance_spec: The instance spec to verify
277
  @type disk_template: string
278
  @param disk_template: the disk template of the instance
279
  @param _compute_fn: The function to verify ipolicy (unittest only)
280
  @see: L{ComputeIPolicySpecViolation}
281

282
  """
283
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289

    
290
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291
                     disk_sizes, spindle_use, disk_template)
292

    
293

    
294
def _CheckOSVariant(os_obj, name):
295
  """Check whether an OS name conforms to the os variants specification.
296

297
  @type os_obj: L{objects.OS}
298
  @param os_obj: OS object to check
299
  @type name: string
300
  @param name: OS name passed by the user, to check for validity
301

302
  """
303
  variant = objects.OS.GetVariant(name)
304
  if not os_obj.supported_variants:
305
    if variant:
306
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307
                                 " passed)" % (os_obj.name, variant),
308
                                 errors.ECODE_INVAL)
309
    return
310
  if not variant:
311
    raise errors.OpPrereqError("OS name must include a variant",
312
                               errors.ECODE_INVAL)
313

    
314
  if variant not in os_obj.supported_variants:
315
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316

    
317

    
318
class LUInstanceCreate(LogicalUnit):
319
  """Create an instance.
320

321
  """
322
  HPATH = "instance-add"
323
  HTYPE = constants.HTYPE_INSTANCE
324
  REQ_BGL = False
325

    
326
  def _CheckDiskTemplateValid(self):
327
    """Checks validity of disk template.
328

329
    """
330
    cluster = self.cfg.GetClusterInfo()
331
    if self.op.disk_template is None:
332
      # FIXME: It would be better to take the default disk template from the
333
      # ipolicy, but for the ipolicy we need the primary node, which we get from
334
      # the iallocator, which wants the disk template as input. To solve this
335
      # chicken-and-egg problem, it should be possible to specify just a node
336
      # group from the iallocator and take the ipolicy from that.
337
      self.op.disk_template = cluster.enabled_disk_templates[0]
338
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339

    
340
  def _CheckDiskArguments(self):
341
    """Checks validity of disk-related arguments.
342

343
    """
344
    # check that disk's names are unique and valid
345
    utils.ValidateDeviceNames("disk", self.op.disks)
346

    
347
    self._CheckDiskTemplateValid()
348

    
349
    # check disks. parameter names and consistent adopt/no-adopt strategy
350
    has_adopt = has_no_adopt = False
351
    for disk in self.op.disks:
352
      if self.op.disk_template != constants.DT_EXT:
353
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354
      if constants.IDISK_ADOPT in disk:
355
        has_adopt = True
356
      else:
357
        has_no_adopt = True
358
    if has_adopt and has_no_adopt:
359
      raise errors.OpPrereqError("Either all disks are adopted or none is",
360
                                 errors.ECODE_INVAL)
361
    if has_adopt:
362
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363
        raise errors.OpPrereqError("Disk adoption is not supported for the"
364
                                   " '%s' disk template" %
365
                                   self.op.disk_template,
366
                                   errors.ECODE_INVAL)
367
      if self.op.iallocator is not None:
368
        raise errors.OpPrereqError("Disk adoption not allowed with an"
369
                                   " iallocator script", errors.ECODE_INVAL)
370
      if self.op.mode == constants.INSTANCE_IMPORT:
371
        raise errors.OpPrereqError("Disk adoption not allowed for"
372
                                   " instance import", errors.ECODE_INVAL)
373
    else:
374
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
375
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376
                                   " but no 'adopt' parameter given" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379

    
380
    self.adopt_disks = has_adopt
381

    
382
  def CheckArguments(self):
383
    """Check arguments.
384

385
    """
386
    # do not require name_check to ease forward/backward compatibility
387
    # for tools
388
    if self.op.no_install and self.op.start:
389
      self.LogInfo("No-installation mode selected, disabling startup")
390
      self.op.start = False
391
    # validate/normalize the instance name
392
    self.op.instance_name = \
393
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
394

    
395
    if self.op.ip_check and not self.op.name_check:
396
      # TODO: make the ip check more flexible and not depend on the name check
397
      raise errors.OpPrereqError("Cannot do IP address check without a name"
398
                                 " check", errors.ECODE_INVAL)
399

    
400
    # check nics' parameter names
401
    for nic in self.op.nics:
402
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403
    # check that NIC's parameters names are unique and valid
404
    utils.ValidateDeviceNames("NIC", self.op.nics)
405

    
406
    self._CheckDiskArguments()
407

    
408
    # instance name verification
409
    if self.op.name_check:
410
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411
      self.op.instance_name = self.hostname.name
412
      # used in CheckPrereq for ip ping check
413
      self.check_ip = self.hostname.ip
414
    else:
415
      self.check_ip = None
416

    
417
    # file storage checks
418
    if (self.op.file_driver and
419
        not self.op.file_driver in constants.FILE_DRIVER):
420
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
421
                                 self.op.file_driver, errors.ECODE_INVAL)
422

    
423
    # set default file_driver if unset and required
424
    if (not self.op.file_driver and
425
        self.op.disk_template in [constants.DT_FILE,
426
                                  constants.DT_SHARED_FILE]):
427
      self.op.file_driver = constants.FD_LOOP
428

    
429
    ### Node/iallocator related checks
430
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
431

    
432
    if self.op.pnode is not None:
433
      if self.op.disk_template in constants.DTS_INT_MIRROR:
434
        if self.op.snode is None:
435
          raise errors.OpPrereqError("The networked disk templates need"
436
                                     " a mirror node", errors.ECODE_INVAL)
437
      elif self.op.snode:
438
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
439
                        " template")
440
        self.op.snode = None
441

    
442
    _CheckOpportunisticLocking(self.op)
443

    
444
    self._cds = GetClusterDomainSecret()
445

    
446
    if self.op.mode == constants.INSTANCE_IMPORT:
447
      # On import force_variant must be True, because if we forced it at
448
      # initial install, our only chance when importing it back is that it
449
      # works again!
450
      self.op.force_variant = True
451

    
452
      if self.op.no_install:
453
        self.LogInfo("No-installation mode has no effect during import")
454

    
455
    elif self.op.mode == constants.INSTANCE_CREATE:
456
      if self.op.os_type is None:
457
        raise errors.OpPrereqError("No guest OS specified",
458
                                   errors.ECODE_INVAL)
459
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
460
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
461
                                   " installation" % self.op.os_type,
462
                                   errors.ECODE_STATE)
463
      if self.op.disk_template is None:
464
        raise errors.OpPrereqError("No disk template specified",
465
                                   errors.ECODE_INVAL)
466

    
467
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
468
      # Check handshake to ensure both clusters have the same domain secret
469
      src_handshake = self.op.source_handshake
470
      if not src_handshake:
471
        raise errors.OpPrereqError("Missing source handshake",
472
                                   errors.ECODE_INVAL)
473

    
474
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
475
                                                           src_handshake)
476
      if errmsg:
477
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
478
                                   errors.ECODE_INVAL)
479

    
480
      # Load and check source CA
481
      self.source_x509_ca_pem = self.op.source_x509_ca
482
      if not self.source_x509_ca_pem:
483
        raise errors.OpPrereqError("Missing source X509 CA",
484
                                   errors.ECODE_INVAL)
485

    
486
      try:
487
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
488
                                                    self._cds)
489
      except OpenSSL.crypto.Error, err:
490
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
491
                                   (err, ), errors.ECODE_INVAL)
492

    
493
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
494
      if errcode is not None:
495
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
496
                                   errors.ECODE_INVAL)
497

    
498
      self.source_x509_ca = cert
499

    
500
      src_instance_name = self.op.source_instance_name
501
      if not src_instance_name:
502
        raise errors.OpPrereqError("Missing source instance name",
503
                                   errors.ECODE_INVAL)
504

    
505
      self.source_instance_name = \
506
        netutils.GetHostname(name=src_instance_name).name
507

    
508
    else:
509
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
510
                                 self.op.mode, errors.ECODE_INVAL)
511

    
512
  def ExpandNames(self):
513
    """ExpandNames for CreateInstance.
514

515
    Figure out the right locks for instance creation.
516

517
    """
518
    self.needed_locks = {}
519

    
520
    # this is just a preventive check, but someone might still add this
521
    # instance in the meantime, and creation will fail at lock-add time
522
    if self.op.instance_name in\
523
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
524
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
525
                                 self.op.instance_name, errors.ECODE_EXISTS)
526

    
527
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
528

    
529
    if self.op.iallocator:
530
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
531
      # specifying a group on instance creation and then selecting nodes from
532
      # that group
533
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
534
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
535

    
536
      if self.op.opportunistic_locking:
537
        self.opportunistic_locks[locking.LEVEL_NODE] = True
538
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
539
    else:
540
      (self.op.pnode_uuid, self.op.pnode) = \
541
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
542
      nodelist = [self.op.pnode_uuid]
543
      if self.op.snode is not None:
544
        (self.op.snode_uuid, self.op.snode) = \
545
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
546
        nodelist.append(self.op.snode_uuid)
547
      self.needed_locks[locking.LEVEL_NODE] = nodelist
548

    
549
    # in case of import lock the source node too
550
    if self.op.mode == constants.INSTANCE_IMPORT:
551
      src_node = self.op.src_node
552
      src_path = self.op.src_path
553

    
554
      if src_path is None:
555
        self.op.src_path = src_path = self.op.instance_name
556

    
557
      if src_node is None:
558
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
559
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
560
        self.op.src_node = None
561
        if os.path.isabs(src_path):
562
          raise errors.OpPrereqError("Importing an instance from a path"
563
                                     " requires a source node option",
564
                                     errors.ECODE_INVAL)
565
      else:
566
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
567
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
568
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
569
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
570
        if not os.path.isabs(src_path):
571
          self.op.src_path = \
572
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
573

    
574
    self.needed_locks[locking.LEVEL_NODE_RES] = \
575
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
576

    
577
  def _RunAllocator(self):
578
    """Run the allocator based on input opcode.
579

580
    """
581
    if self.op.opportunistic_locking:
582
      # Only consider nodes for which a lock is held
583
      node_name_whitelist = self.cfg.GetNodeNames(
584
        self.owned_locks(locking.LEVEL_NODE))
585
    else:
586
      node_name_whitelist = None
587

    
588
    #TODO Export network to iallocator so that it chooses a pnode
589
    #     in a nodegroup that has the desired network connected to
590
    req = _CreateInstanceAllocRequest(self.op, self.disks,
591
                                      self.nics, self.be_full,
592
                                      node_name_whitelist)
593
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
594

    
595
    ial.Run(self.op.iallocator)
596

    
597
    if not ial.success:
598
      # When opportunistic locks are used only a temporary failure is generated
599
      if self.op.opportunistic_locking:
600
        ecode = errors.ECODE_TEMP_NORES
601
      else:
602
        ecode = errors.ECODE_NORES
603

    
604
      raise errors.OpPrereqError("Can't compute nodes using"
605
                                 " iallocator '%s': %s" %
606
                                 (self.op.iallocator, ial.info),
607
                                 ecode)
608

    
609
    (self.op.pnode_uuid, self.op.pnode) = \
610
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
611
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
612
                 self.op.instance_name, self.op.iallocator,
613
                 utils.CommaJoin(ial.result))
614

    
615
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
616

    
617
    if req.RequiredNodes() == 2:
618
      (self.op.snode_uuid, self.op.snode) = \
619
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
620

    
621
  def BuildHooksEnv(self):
622
    """Build hooks env.
623

624
    This runs on master, primary and secondary nodes of the instance.
625

626
    """
627
    env = {
628
      "ADD_MODE": self.op.mode,
629
      }
630
    if self.op.mode == constants.INSTANCE_IMPORT:
631
      env["SRC_NODE"] = self.op.src_node
632
      env["SRC_PATH"] = self.op.src_path
633
      env["SRC_IMAGES"] = self.src_images
634

    
635
    env.update(BuildInstanceHookEnv(
636
      name=self.op.instance_name,
637
      primary_node_name=self.op.pnode,
638
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
639
      status=self.op.start,
640
      os_type=self.op.os_type,
641
      minmem=self.be_full[constants.BE_MINMEM],
642
      maxmem=self.be_full[constants.BE_MAXMEM],
643
      vcpus=self.be_full[constants.BE_VCPUS],
644
      nics=NICListToTuple(self, self.nics),
645
      disk_template=self.op.disk_template,
646
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
647
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
648
             for d in self.disks],
649
      bep=self.be_full,
650
      hvp=self.hv_full,
651
      hypervisor_name=self.op.hypervisor,
652
      tags=self.op.tags,
653
      ))
654

    
655
    return env
656

    
657
  def BuildHooksNodes(self):
658
    """Build hooks nodes.
659

660
    """
661
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
662
    return nl, nl
663

    
664
  def _ReadExportInfo(self):
665
    """Reads the export information from disk.
666

667
    It will override the opcode source node and path with the actual
668
    information, if these two were not specified before.
669

670
    @return: the export information
671

672
    """
673
    assert self.op.mode == constants.INSTANCE_IMPORT
674

    
675
    if self.op.src_node_uuid is None:
676
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
677
      exp_list = self.rpc.call_export_list(locked_nodes)
678
      found = False
679
      for node_uuid in exp_list:
680
        if exp_list[node_uuid].fail_msg:
681
          continue
682
        if self.op.src_path in exp_list[node_uuid].payload:
683
          found = True
684
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
685
          self.op.src_node_uuid = node_uuid
686
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
687
                                            self.op.src_path)
688
          break
689
      if not found:
690
        raise errors.OpPrereqError("No export found for relative path %s" %
691
                                   self.op.src_path, errors.ECODE_INVAL)
692

    
693
    CheckNodeOnline(self, self.op.src_node_uuid)
694
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
695
    result.Raise("No export or invalid export found in dir %s" %
696
                 self.op.src_path)
697

    
698
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
699
    if not export_info.has_section(constants.INISECT_EXP):
700
      raise errors.ProgrammerError("Corrupted export config",
701
                                   errors.ECODE_ENVIRON)
702

    
703
    ei_version = export_info.get(constants.INISECT_EXP, "version")
704
    if int(ei_version) != constants.EXPORT_VERSION:
705
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
706
                                 (ei_version, constants.EXPORT_VERSION),
707
                                 errors.ECODE_ENVIRON)
708
    return export_info
709

    
710
  def _ReadExportParams(self, einfo):
711
    """Use export parameters as defaults.
712

713
    In case the opcode doesn't specify (as in override) some instance
714
    parameters, then try to use them from the export information, if
715
    that declares them.
716

717
    """
718
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
719

    
720
    if not self.op.disks:
721
      disks = []
722
      # TODO: import the disk iv_name too
723
      for idx in range(constants.MAX_DISKS):
724
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
725
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
726
          disks.append({constants.IDISK_SIZE: disk_sz})
727
      self.op.disks = disks
728
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
729
        raise errors.OpPrereqError("No disk info specified and the export"
730
                                   " is missing the disk information",
731
                                   errors.ECODE_INVAL)
732

    
733
    if not self.op.nics:
734
      nics = []
735
      for idx in range(constants.MAX_NICS):
736
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
737
          ndict = {}
738
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
739
            nic_param_name = "nic%d_%s" % (idx, name)
740
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
741
              v = einfo.get(constants.INISECT_INS, nic_param_name)
742
              ndict[name] = v
743
          nics.append(ndict)
744
        else:
745
          break
746
      self.op.nics = nics
747

    
748
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
749
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
750

    
751
    if (self.op.hypervisor is None and
752
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
753
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
754

    
755
    if einfo.has_section(constants.INISECT_HYP):
756
      # use the export parameters but do not override the ones
757
      # specified by the user
758
      for name, value in einfo.items(constants.INISECT_HYP):
759
        if name not in self.op.hvparams:
760
          self.op.hvparams[name] = value
761

    
762
    if einfo.has_section(constants.INISECT_BEP):
763
      # use the parameters, without overriding
764
      for name, value in einfo.items(constants.INISECT_BEP):
765
        if name not in self.op.beparams:
766
          self.op.beparams[name] = value
767
        # Compatibility for the old "memory" be param
768
        if name == constants.BE_MEMORY:
769
          if constants.BE_MAXMEM not in self.op.beparams:
770
            self.op.beparams[constants.BE_MAXMEM] = value
771
          if constants.BE_MINMEM not in self.op.beparams:
772
            self.op.beparams[constants.BE_MINMEM] = value
773
    else:
774
      # try to read the parameters old style, from the main section
775
      for name in constants.BES_PARAMETERS:
776
        if (name not in self.op.beparams and
777
            einfo.has_option(constants.INISECT_INS, name)):
778
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
779

    
780
    if einfo.has_section(constants.INISECT_OSP):
781
      # use the parameters, without overriding
782
      for name, value in einfo.items(constants.INISECT_OSP):
783
        if name not in self.op.osparams:
784
          self.op.osparams[name] = value
785

    
786
  def _RevertToDefaults(self, cluster):
787
    """Revert the instance parameters to the default values.
788

789
    """
790
    # hvparams
791
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
792
    for name in self.op.hvparams.keys():
793
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
794
        del self.op.hvparams[name]
795
    # beparams
796
    be_defs = cluster.SimpleFillBE({})
797
    for name in self.op.beparams.keys():
798
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
799
        del self.op.beparams[name]
800
    # nic params
801
    nic_defs = cluster.SimpleFillNIC({})
802
    for nic in self.op.nics:
803
      for name in constants.NICS_PARAMETERS:
804
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
805
          del nic[name]
806
    # osparams
807
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
808
    for name in self.op.osparams.keys():
809
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
810
        del self.op.osparams[name]
811

    
812
  def _CalculateFileStorageDir(self):
813
    """Calculate final instance file storage dir.
814

815
    """
816
    # file storage dir calculation/check
817
    self.instance_file_storage_dir = None
818
    if self.op.disk_template in constants.DTS_FILEBASED:
819
      # build the full file storage dir path
820
      joinargs = []
821

    
822
      if self.op.disk_template == constants.DT_SHARED_FILE:
823
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
824
      else:
825
        get_fsd_fn = self.cfg.GetFileStorageDir
826

    
827
      cfg_storagedir = get_fsd_fn()
828
      if not cfg_storagedir:
829
        raise errors.OpPrereqError("Cluster file storage dir not defined",
830
                                   errors.ECODE_STATE)
831
      joinargs.append(cfg_storagedir)
832

    
833
      if self.op.file_storage_dir is not None:
834
        joinargs.append(self.op.file_storage_dir)
835

    
836
      joinargs.append(self.op.instance_name)
837

    
838
      # pylint: disable=W0142
839
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
840

    
841
  def CheckPrereq(self): # pylint: disable=R0914
842
    """Check prerequisites.
843

844
    """
845
    self._CalculateFileStorageDir()
846

    
847
    if self.op.mode == constants.INSTANCE_IMPORT:
848
      export_info = self._ReadExportInfo()
849
      self._ReadExportParams(export_info)
850
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
851
    else:
852
      self._old_instance_name = None
853

    
854
    if (not self.cfg.GetVGName() and
855
        self.op.disk_template not in constants.DTS_NOT_LVM):
856
      raise errors.OpPrereqError("Cluster does not support lvm-based"
857
                                 " instances", errors.ECODE_STATE)
858

    
859
    if (self.op.hypervisor is None or
860
        self.op.hypervisor == constants.VALUE_AUTO):
861
      self.op.hypervisor = self.cfg.GetHypervisorType()
862

    
863
    cluster = self.cfg.GetClusterInfo()
864
    enabled_hvs = cluster.enabled_hypervisors
865
    if self.op.hypervisor not in enabled_hvs:
866
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
867
                                 " cluster (%s)" %
868
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
869
                                 errors.ECODE_STATE)
870

    
871
    # Check tag validity
872
    for tag in self.op.tags:
873
      objects.TaggableObject.ValidateTag(tag)
874

    
875
    # check hypervisor parameter syntax (locally)
876
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
877
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
878
                                      self.op.hvparams)
879
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
880
    hv_type.CheckParameterSyntax(filled_hvp)
881
    self.hv_full = filled_hvp
882
    # check that we don't specify global parameters on an instance
883
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
884
                         "instance", "cluster")
885

    
886
    # fill and remember the beparams dict
887
    self.be_full = _ComputeFullBeParams(self.op, cluster)
888

    
889
    # build os parameters
890
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
891

    
892
    # now that hvp/bep are in final format, let's reset to defaults,
893
    # if told to do so
894
    if self.op.identify_defaults:
895
      self._RevertToDefaults(cluster)
896

    
897
    # NIC buildup
898
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
899
                             self.proc.GetECId())
900

    
901
    # disk checks/pre-build
902
    default_vg = self.cfg.GetVGName()
903
    self.disks = ComputeDisks(self.op, default_vg)
904

    
905
    if self.op.mode == constants.INSTANCE_IMPORT:
906
      disk_images = []
907
      for idx in range(len(self.disks)):
908
        option = "disk%d_dump" % idx
909
        if export_info.has_option(constants.INISECT_INS, option):
910
          # FIXME: are the old os-es, disk sizes, etc. useful?
911
          export_name = export_info.get(constants.INISECT_INS, option)
912
          image = utils.PathJoin(self.op.src_path, export_name)
913
          disk_images.append(image)
914
        else:
915
          disk_images.append(False)
916

    
917
      self.src_images = disk_images
918

    
919
      if self.op.instance_name == self._old_instance_name:
920
        for idx, nic in enumerate(self.nics):
921
          if nic.mac == constants.VALUE_AUTO:
922
            nic_mac_ini = "nic%d_mac" % idx
923
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
924

    
925
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
926

    
927
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
928
    if self.op.ip_check:
929
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
930
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
931
                                   (self.check_ip, self.op.instance_name),
932
                                   errors.ECODE_NOTUNIQUE)
933

    
934
    #### mac address generation
935
    # By generating here the mac address both the allocator and the hooks get
936
    # the real final mac address rather than the 'auto' or 'generate' value.
937
    # There is a race condition between the generation and the instance object
938
    # creation, which means that we know the mac is valid now, but we're not
939
    # sure it will be when we actually add the instance. If things go bad
940
    # adding the instance will abort because of a duplicate mac, and the
941
    # creation job will fail.
942
    for nic in self.nics:
943
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
944
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
945

    
946
    #### allocator run
947

    
948
    if self.op.iallocator is not None:
949
      self._RunAllocator()
950

    
951
    # Release all unneeded node locks
952
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
953
                               self.op.src_node_uuid])
954
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
955
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
956
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
957

    
958
    assert (self.owned_locks(locking.LEVEL_NODE) ==
959
            self.owned_locks(locking.LEVEL_NODE_RES)), \
960
      "Node locks differ from node resource locks"
961

    
962
    #### node related checks
963

    
964
    # check primary node
965
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
966
    assert self.pnode is not None, \
967
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
968
    if pnode.offline:
969
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
970
                                 pnode.name, errors.ECODE_STATE)
971
    if pnode.drained:
972
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
973
                                 pnode.name, errors.ECODE_STATE)
974
    if not pnode.vm_capable:
975
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
976
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
977

    
978
    self.secondaries = []
979

    
980
    # Fill in any IPs from IP pools. This must happen here, because we need to
981
    # know the nic's primary node, as specified by the iallocator
982
    for idx, nic in enumerate(self.nics):
983
      net_uuid = nic.network
984
      if net_uuid is not None:
985
        nobj = self.cfg.GetNetwork(net_uuid)
986
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
987
        if netparams is None:
988
          raise errors.OpPrereqError("No netparams found for network"
989
                                     " %s. Propably not connected to"
990
                                     " node's %s nodegroup" %
991
                                     (nobj.name, self.pnode.name),
992
                                     errors.ECODE_INVAL)
993
        self.LogInfo("NIC/%d inherits netparams %s" %
994
                     (idx, netparams.values()))
995
        nic.nicparams = dict(netparams)
996
        if nic.ip is not None:
997
          if nic.ip.lower() == constants.NIC_IP_POOL:
998
            try:
999
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1000
            except errors.ReservationError:
1001
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1002
                                         " from the address pool" % idx,
1003
                                         errors.ECODE_STATE)
1004
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1005
          else:
1006
            try:
1007
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1008
            except errors.ReservationError:
1009
              raise errors.OpPrereqError("IP address %s already in use"
1010
                                         " or does not belong to network %s" %
1011
                                         (nic.ip, nobj.name),
1012
                                         errors.ECODE_NOTUNIQUE)
1013

    
1014
      # net is None, ip None or given
1015
      elif self.op.conflicts_check:
1016
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1017

    
1018
    # mirror node verification
1019
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1020
      if self.op.snode_uuid == pnode.uuid:
1021
        raise errors.OpPrereqError("The secondary node cannot be the"
1022
                                   " primary node", errors.ECODE_INVAL)
1023
      CheckNodeOnline(self, self.op.snode_uuid)
1024
      CheckNodeNotDrained(self, self.op.snode_uuid)
1025
      CheckNodeVmCapable(self, self.op.snode_uuid)
1026
      self.secondaries.append(self.op.snode_uuid)
1027

    
1028
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1029
      if pnode.group != snode.group:
1030
        self.LogWarning("The primary and secondary nodes are in two"
1031
                        " different node groups; the disk parameters"
1032
                        " from the first disk's node group will be"
1033
                        " used")
1034

    
1035
    nodes = [pnode]
1036
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1037
      nodes.append(snode)
1038
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1039
    excl_stor = compat.any(map(has_es, nodes))
1040
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1041
      raise errors.OpPrereqError("Disk template %s not supported with"
1042
                                 " exclusive storage" % self.op.disk_template,
1043
                                 errors.ECODE_STATE)
1044
    for disk in self.disks:
1045
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1046

    
1047
    node_uuids = [pnode.uuid] + self.secondaries
1048

    
1049
    if not self.adopt_disks:
1050
      if self.op.disk_template == constants.DT_RBD:
1051
        # _CheckRADOSFreeSpace() is just a placeholder.
1052
        # Any function that checks prerequisites can be placed here.
1053
        # Check if there is enough space on the RADOS cluster.
1054
        CheckRADOSFreeSpace()
1055
      elif self.op.disk_template == constants.DT_EXT:
1056
        # FIXME: Function that checks prereqs if needed
1057
        pass
1058
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1059
        # Check lv size requirements, if not adopting
1060
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1061
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1062
      else:
1063
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1064
        pass
1065

    
1066
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1067
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1068
                                disk[constants.IDISK_ADOPT])
1069
                     for disk in self.disks])
1070
      if len(all_lvs) != len(self.disks):
1071
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1072
                                   errors.ECODE_INVAL)
1073
      for lv_name in all_lvs:
1074
        try:
1075
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1076
          # to ReserveLV uses the same syntax
1077
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1078
        except errors.ReservationError:
1079
          raise errors.OpPrereqError("LV named %s used by another instance" %
1080
                                     lv_name, errors.ECODE_NOTUNIQUE)
1081

    
1082
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1083
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1084

    
1085
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1086
                                       vg_names.payload.keys())[pnode.uuid]
1087
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1088
      node_lvs = node_lvs.payload
1089

    
1090
      delta = all_lvs.difference(node_lvs.keys())
1091
      if delta:
1092
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1093
                                   utils.CommaJoin(delta),
1094
                                   errors.ECODE_INVAL)
1095
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1096
      if online_lvs:
1097
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1098
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1099
                                   errors.ECODE_STATE)
1100
      # update the size of disk based on what is found
1101
      for dsk in self.disks:
1102
        dsk[constants.IDISK_SIZE] = \
1103
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1104
                                        dsk[constants.IDISK_ADOPT])][0]))
1105

    
1106
    elif self.op.disk_template == constants.DT_BLOCK:
1107
      # Normalize and de-duplicate device paths
1108
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1109
                       for disk in self.disks])
1110
      if len(all_disks) != len(self.disks):
1111
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1112
                                   errors.ECODE_INVAL)
1113
      baddisks = [d for d in all_disks
1114
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1115
      if baddisks:
1116
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1117
                                   " cannot be adopted" %
1118
                                   (utils.CommaJoin(baddisks),
1119
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1120
                                   errors.ECODE_INVAL)
1121

    
1122
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1123
                                            list(all_disks))[pnode.uuid]
1124
      node_disks.Raise("Cannot get block device information from node %s" %
1125
                       pnode.name)
1126
      node_disks = node_disks.payload
1127
      delta = all_disks.difference(node_disks.keys())
1128
      if delta:
1129
        raise errors.OpPrereqError("Missing block device(s): %s" %
1130
                                   utils.CommaJoin(delta),
1131
                                   errors.ECODE_INVAL)
1132
      for dsk in self.disks:
1133
        dsk[constants.IDISK_SIZE] = \
1134
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1135

    
1136
    # Verify instance specs
1137
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1138
    ispec = {
1139
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1140
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1141
      constants.ISPEC_DISK_COUNT: len(self.disks),
1142
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1143
                                  for disk in self.disks],
1144
      constants.ISPEC_NIC_COUNT: len(self.nics),
1145
      constants.ISPEC_SPINDLE_USE: spindle_use,
1146
      }
1147

    
1148
    group_info = self.cfg.GetNodeGroup(pnode.group)
1149
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1150
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1151
                                               self.op.disk_template)
1152
    if not self.op.ignore_ipolicy and res:
1153
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1154
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1155
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1156

    
1157
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1158

    
1159
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1160
    # check OS parameters (remotely)
1161
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1162

    
1163
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1164

    
1165
    #TODO: _CheckExtParams (remotely)
1166
    # Check parameters for extstorage
1167

    
1168
    # memory check on primary node
1169
    #TODO(dynmem): use MINMEM for checking
1170
    if self.op.start:
1171
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1172
                                self.op.hvparams)
1173
      CheckNodeFreeMemory(self, self.pnode.uuid,
1174
                          "creating instance %s" % self.op.instance_name,
1175
                          self.be_full[constants.BE_MAXMEM],
1176
                          self.op.hypervisor, hvfull)
1177

    
1178
    self.dry_run_result = list(node_uuids)
1179

    
1180
  def Exec(self, feedback_fn):
1181
    """Create and add the instance to the cluster.
1182

1183
    """
1184
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1185
                self.owned_locks(locking.LEVEL_NODE)), \
1186
      "Node locks differ from node resource locks"
1187
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1188

    
1189
    ht_kind = self.op.hypervisor
1190
    if ht_kind in constants.HTS_REQ_PORT:
1191
      network_port = self.cfg.AllocatePort()
1192
    else:
1193
      network_port = None
1194

    
1195
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1196

    
1197
    # This is ugly but we got a chicken-egg problem here
1198
    # We can only take the group disk parameters, as the instance
1199
    # has no disks yet (we are generating them right here).
1200
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1201
    disks = GenerateDiskTemplate(self,
1202
                                 self.op.disk_template,
1203
                                 instance_uuid, self.pnode.uuid,
1204
                                 self.secondaries,
1205
                                 self.disks,
1206
                                 self.instance_file_storage_dir,
1207
                                 self.op.file_driver,
1208
                                 0,
1209
                                 feedback_fn,
1210
                                 self.cfg.GetGroupDiskParams(nodegroup))
1211

    
1212
    iobj = objects.Instance(name=self.op.instance_name,
1213
                            uuid=instance_uuid,
1214
                            os=self.op.os_type,
1215
                            primary_node=self.pnode.uuid,
1216
                            nics=self.nics, disks=disks,
1217
                            disk_template=self.op.disk_template,
1218
                            disks_active=False,
1219
                            admin_state=constants.ADMINST_DOWN,
1220
                            network_port=network_port,
1221
                            beparams=self.op.beparams,
1222
                            hvparams=self.op.hvparams,
1223
                            hypervisor=self.op.hypervisor,
1224
                            osparams=self.op.osparams,
1225
                            )
1226

    
1227
    if self.op.tags:
1228
      for tag in self.op.tags:
1229
        iobj.AddTag(tag)
1230

    
1231
    if self.adopt_disks:
1232
      if self.op.disk_template == constants.DT_PLAIN:
1233
        # rename LVs to the newly-generated names; we need to construct
1234
        # 'fake' LV disks with the old data, plus the new unique_id
1235
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1236
        rename_to = []
1237
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1238
          rename_to.append(t_dsk.logical_id)
1239
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1240
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1241
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1242
                                               zip(tmp_disks, rename_to))
1243
        result.Raise("Failed to rename adoped LVs")
1244
    else:
1245
      feedback_fn("* creating instance disks...")
1246
      try:
1247
        CreateDisks(self, iobj)
1248
      except errors.OpExecError:
1249
        self.LogWarning("Device creation failed")
1250
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1251
        raise
1252

    
1253
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1254

    
1255
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1256

    
1257
    # Declare that we don't want to remove the instance lock anymore, as we've
1258
    # added the instance to the config
1259
    del self.remove_locks[locking.LEVEL_INSTANCE]
1260

    
1261
    if self.op.mode == constants.INSTANCE_IMPORT:
1262
      # Release unused nodes
1263
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1264
    else:
1265
      # Release all nodes
1266
      ReleaseLocks(self, locking.LEVEL_NODE)
1267

    
1268
    disk_abort = False
1269
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1270
      feedback_fn("* wiping instance disks...")
1271
      try:
1272
        WipeDisks(self, iobj)
1273
      except errors.OpExecError, err:
1274
        logging.exception("Wiping disks failed")
1275
        self.LogWarning("Wiping instance disks failed (%s)", err)
1276
        disk_abort = True
1277

    
1278
    if disk_abort:
1279
      # Something is already wrong with the disks, don't do anything else
1280
      pass
1281
    elif self.op.wait_for_sync:
1282
      disk_abort = not WaitForSync(self, iobj)
1283
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1284
      # make sure the disks are not degraded (still sync-ing is ok)
1285
      feedback_fn("* checking mirrors status")
1286
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1287
    else:
1288
      disk_abort = False
1289

    
1290
    if disk_abort:
1291
      RemoveDisks(self, iobj)
1292
      self.cfg.RemoveInstance(iobj.uuid)
1293
      # Make sure the instance lock gets removed
1294
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1295
      raise errors.OpExecError("There are some degraded disks for"
1296
                               " this instance")
1297

    
1298
    # instance disks are now active
1299
    iobj.disks_active = True
1300

    
1301
    # Release all node resource locks
1302
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1303

    
1304
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1305
      # we need to set the disks ID to the primary node, since the
1306
      # preceding code might or might have not done it, depending on
1307
      # disk template and other options
1308
      for disk in iobj.disks:
1309
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1310
      if self.op.mode == constants.INSTANCE_CREATE:
1311
        if not self.op.no_install:
1312
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1313
                        not self.op.wait_for_sync)
1314
          if pause_sync:
1315
            feedback_fn("* pausing disk sync to install instance OS")
1316
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1317
                                                              (iobj.disks,
1318
                                                               iobj), True)
1319
            for idx, success in enumerate(result.payload):
1320
              if not success:
1321
                logging.warn("pause-sync of instance %s for disk %d failed",
1322
                             self.op.instance_name, idx)
1323

    
1324
          feedback_fn("* running the instance OS create scripts...")
1325
          # FIXME: pass debug option from opcode to backend
1326
          os_add_result = \
1327
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1328
                                          self.op.debug_level)
1329
          if pause_sync:
1330
            feedback_fn("* resuming disk sync")
1331
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1332
                                                              (iobj.disks,
1333
                                                               iobj), False)
1334
            for idx, success in enumerate(result.payload):
1335
              if not success:
1336
                logging.warn("resume-sync of instance %s for disk %d failed",
1337
                             self.op.instance_name, idx)
1338

    
1339
          os_add_result.Raise("Could not add os for instance %s"
1340
                              " on node %s" % (self.op.instance_name,
1341
                                               self.pnode.name))
1342

    
1343
      else:
1344
        if self.op.mode == constants.INSTANCE_IMPORT:
1345
          feedback_fn("* running the instance OS import scripts...")
1346

    
1347
          transfers = []
1348

    
1349
          for idx, image in enumerate(self.src_images):
1350
            if not image:
1351
              continue
1352

    
1353
            # FIXME: pass debug option from opcode to backend
1354
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1355
                                               constants.IEIO_FILE, (image, ),
1356
                                               constants.IEIO_SCRIPT,
1357
                                               (iobj.disks[idx], idx),
1358
                                               None)
1359
            transfers.append(dt)
1360

    
1361
          import_result = \
1362
            masterd.instance.TransferInstanceData(self, feedback_fn,
1363
                                                  self.op.src_node_uuid,
1364
                                                  self.pnode.uuid,
1365
                                                  self.pnode.secondary_ip,
1366
                                                  iobj, transfers)
1367
          if not compat.all(import_result):
1368
            self.LogWarning("Some disks for instance %s on node %s were not"
1369
                            " imported successfully" % (self.op.instance_name,
1370
                                                        self.pnode.name))
1371

    
1372
          rename_from = self._old_instance_name
1373

    
1374
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1375
          feedback_fn("* preparing remote import...")
1376
          # The source cluster will stop the instance before attempting to make
1377
          # a connection. In some cases stopping an instance can take a long
1378
          # time, hence the shutdown timeout is added to the connection
1379
          # timeout.
1380
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1381
                             self.op.source_shutdown_timeout)
1382
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1383

    
1384
          assert iobj.primary_node == self.pnode.uuid
1385
          disk_results = \
1386
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1387
                                          self.source_x509_ca,
1388
                                          self._cds, timeouts)
1389
          if not compat.all(disk_results):
1390
            # TODO: Should the instance still be started, even if some disks
1391
            # failed to import (valid for local imports, too)?
1392
            self.LogWarning("Some disks for instance %s on node %s were not"
1393
                            " imported successfully" % (self.op.instance_name,
1394
                                                        self.pnode.name))
1395

    
1396
          rename_from = self.source_instance_name
1397

    
1398
        else:
1399
          # also checked in the prereq part
1400
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1401
                                       % self.op.mode)
1402

    
1403
        # Run rename script on newly imported instance
1404
        assert iobj.name == self.op.instance_name
1405
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1406
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1407
                                                   rename_from,
1408
                                                   self.op.debug_level)
1409
        result.Warn("Failed to run rename script for %s on node %s" %
1410
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1411

    
1412
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1413

    
1414
    if self.op.start:
1415
      iobj.admin_state = constants.ADMINST_UP
1416
      self.cfg.Update(iobj, feedback_fn)
1417
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1418
                   self.pnode.name)
1419
      feedback_fn("* starting instance...")
1420
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1421
                                            False, self.op.reason)
1422
      result.Raise("Could not start instance")
1423

    
1424
    return list(iobj.all_nodes)
1425

    
1426

    
1427
class LUInstanceRename(LogicalUnit):
1428
  """Rename an instance.
1429

1430
  """
1431
  HPATH = "instance-rename"
1432
  HTYPE = constants.HTYPE_INSTANCE
1433

    
1434
  def CheckArguments(self):
1435
    """Check arguments.
1436

1437
    """
1438
    if self.op.ip_check and not self.op.name_check:
1439
      # TODO: make the ip check more flexible and not depend on the name check
1440
      raise errors.OpPrereqError("IP address check requires a name check",
1441
                                 errors.ECODE_INVAL)
1442

    
1443
  def BuildHooksEnv(self):
1444
    """Build hooks env.
1445

1446
    This runs on master, primary and secondary nodes of the instance.
1447

1448
    """
1449
    env = BuildInstanceHookEnvByObject(self, self.instance)
1450
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1451
    return env
1452

    
1453
  def BuildHooksNodes(self):
1454
    """Build hooks nodes.
1455

1456
    """
1457
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1458
    return (nl, nl)
1459

    
1460
  def CheckPrereq(self):
1461
    """Check prerequisites.
1462

1463
    This checks that the instance is in the cluster and is not running.
1464

1465
    """
1466
    (self.op.instance_uuid, self.op.instance_name) = \
1467
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1468
                                self.op.instance_name)
1469
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1470
    assert instance is not None
1471

    
1472
    # It should actually not happen that an instance is running with a disabled
1473
    # disk template, but in case it does, the renaming of file-based instances
1474
    # will fail horribly. Thus, we test it before.
1475
    if (instance.disk_template in constants.DTS_FILEBASED and
1476
        self.op.new_name != instance.name):
1477
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1478
                               instance.disk_template)
1479

    
1480
    CheckNodeOnline(self, instance.primary_node)
1481
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1482
                       msg="cannot rename")
1483
    self.instance = instance
1484

    
1485
    new_name = self.op.new_name
1486
    if self.op.name_check:
1487
      hostname = _CheckHostnameSane(self, new_name)
1488
      new_name = self.op.new_name = hostname.name
1489
      if (self.op.ip_check and
1490
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1491
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1492
                                   (hostname.ip, new_name),
1493
                                   errors.ECODE_NOTUNIQUE)
1494

    
1495
    instance_names = [inst.name for
1496
                      inst in self.cfg.GetAllInstancesInfo().values()]
1497
    if new_name in instance_names and new_name != instance.name:
1498
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1499
                                 new_name, errors.ECODE_EXISTS)
1500

    
1501
  def Exec(self, feedback_fn):
1502
    """Rename the instance.
1503

1504
    """
1505
    old_name = self.instance.name
1506

    
1507
    rename_file_storage = False
1508
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1509
        self.op.new_name != self.instance.name):
1510
      old_file_storage_dir = os.path.dirname(
1511
                               self.instance.disks[0].logical_id[1])
1512
      rename_file_storage = True
1513

    
1514
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1515
    # Change the instance lock. This is definitely safe while we hold the BGL.
1516
    # Otherwise the new lock would have to be added in acquired mode.
1517
    assert self.REQ_BGL
1518
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1519
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1520
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1521

    
1522
    # re-read the instance from the configuration after rename
1523
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1524

    
1525
    if rename_file_storage:
1526
      new_file_storage_dir = os.path.dirname(
1527
                               renamed_inst.disks[0].logical_id[1])
1528
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1529
                                                     old_file_storage_dir,
1530
                                                     new_file_storage_dir)
1531
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1532
                   " (but the instance has been renamed in Ganeti)" %
1533
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1534
                    old_file_storage_dir, new_file_storage_dir))
1535

    
1536
    StartInstanceDisks(self, renamed_inst, None)
1537
    # update info on disks
1538
    info = GetInstanceInfoText(renamed_inst)
1539
    for (idx, disk) in enumerate(renamed_inst.disks):
1540
      for node_uuid in renamed_inst.all_nodes:
1541
        self.cfg.SetDiskID(disk, node_uuid)
1542
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1543
        result.Warn("Error setting info on node %s for disk %s" %
1544
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1545
    try:
1546
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1547
                                                 renamed_inst, old_name,
1548
                                                 self.op.debug_level)
1549
      result.Warn("Could not run OS rename script for instance %s on node %s"
1550
                  " (but the instance has been renamed in Ganeti)" %
1551
                  (renamed_inst.name,
1552
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1553
                  self.LogWarning)
1554
    finally:
1555
      ShutdownInstanceDisks(self, renamed_inst)
1556

    
1557
    return renamed_inst.name
1558

    
1559

    
1560
class LUInstanceRemove(LogicalUnit):
1561
  """Remove an instance.
1562

1563
  """
1564
  HPATH = "instance-remove"
1565
  HTYPE = constants.HTYPE_INSTANCE
1566
  REQ_BGL = False
1567

    
1568
  def ExpandNames(self):
1569
    self._ExpandAndLockInstance()
1570
    self.needed_locks[locking.LEVEL_NODE] = []
1571
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1572
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1573

    
1574
  def DeclareLocks(self, level):
1575
    if level == locking.LEVEL_NODE:
1576
      self._LockInstancesNodes()
1577
    elif level == locking.LEVEL_NODE_RES:
1578
      # Copy node locks
1579
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1580
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1581

    
1582
  def BuildHooksEnv(self):
1583
    """Build hooks env.
1584

1585
    This runs on master, primary and secondary nodes of the instance.
1586

1587
    """
1588
    env = BuildInstanceHookEnvByObject(self, self.instance)
1589
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1590
    return env
1591

    
1592
  def BuildHooksNodes(self):
1593
    """Build hooks nodes.
1594

1595
    """
1596
    nl = [self.cfg.GetMasterNode()]
1597
    nl_post = list(self.instance.all_nodes) + nl
1598
    return (nl, nl_post)
1599

    
1600
  def CheckPrereq(self):
1601
    """Check prerequisites.
1602

1603
    This checks that the instance is in the cluster.
1604

1605
    """
1606
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1607
    assert self.instance is not None, \
1608
      "Cannot retrieve locked instance %s" % self.op.instance_name
1609

    
1610
  def Exec(self, feedback_fn):
1611
    """Remove the instance.
1612

1613
    """
1614
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1615
                 self.cfg.GetNodeName(self.instance.primary_node))
1616

    
1617
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1618
                                             self.instance,
1619
                                             self.op.shutdown_timeout,
1620
                                             self.op.reason)
1621
    if self.op.ignore_failures:
1622
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1623
    else:
1624
      result.Raise("Could not shutdown instance %s on node %s" %
1625
                   (self.instance.name,
1626
                    self.cfg.GetNodeName(self.instance.primary_node)))
1627

    
1628
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1629
            self.owned_locks(locking.LEVEL_NODE_RES))
1630
    assert not (set(self.instance.all_nodes) -
1631
                self.owned_locks(locking.LEVEL_NODE)), \
1632
      "Not owning correct locks"
1633

    
1634
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1635

    
1636

    
1637
class LUInstanceMove(LogicalUnit):
1638
  """Move an instance by data-copying.
1639

1640
  """
1641
  HPATH = "instance-move"
1642
  HTYPE = constants.HTYPE_INSTANCE
1643
  REQ_BGL = False
1644

    
1645
  def ExpandNames(self):
1646
    self._ExpandAndLockInstance()
1647
    (self.op.target_node_uuid, self.op.target_node) = \
1648
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1649
                            self.op.target_node)
1650
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1651
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1652
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1653

    
1654
  def DeclareLocks(self, level):
1655
    if level == locking.LEVEL_NODE:
1656
      self._LockInstancesNodes(primary_only=True)
1657
    elif level == locking.LEVEL_NODE_RES:
1658
      # Copy node locks
1659
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1660
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1661

    
1662
  def BuildHooksEnv(self):
1663
    """Build hooks env.
1664

1665
    This runs on master, primary and secondary nodes of the instance.
1666

1667
    """
1668
    env = {
1669
      "TARGET_NODE": self.op.target_node,
1670
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1671
      }
1672
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1673
    return env
1674

    
1675
  def BuildHooksNodes(self):
1676
    """Build hooks nodes.
1677

1678
    """
1679
    nl = [
1680
      self.cfg.GetMasterNode(),
1681
      self.instance.primary_node,
1682
      self.op.target_node_uuid,
1683
      ]
1684
    return (nl, nl)
1685

    
1686
  def CheckPrereq(self):
1687
    """Check prerequisites.
1688

1689
    This checks that the instance is in the cluster.
1690

1691
    """
1692
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1693
    assert self.instance is not None, \
1694
      "Cannot retrieve locked instance %s" % self.op.instance_name
1695

    
1696
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1697
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1698
                                 self.instance.disk_template,
1699
                                 errors.ECODE_STATE)
1700

    
1701
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1702
    assert target_node is not None, \
1703
      "Cannot retrieve locked node %s" % self.op.target_node
1704

    
1705
    self.target_node_uuid = target_node.uuid
1706
    if target_node.uuid == self.instance.primary_node:
1707
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1708
                                 (self.instance.name, target_node.name),
1709
                                 errors.ECODE_STATE)
1710

    
1711
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1712

    
1713
    for idx, dsk in enumerate(self.instance.disks):
1714
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1715
                              constants.DT_SHARED_FILE):
1716
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1717
                                   " cannot copy" % idx, errors.ECODE_STATE)
1718

    
1719
    CheckNodeOnline(self, target_node.uuid)
1720
    CheckNodeNotDrained(self, target_node.uuid)
1721
    CheckNodeVmCapable(self, target_node.uuid)
1722
    cluster = self.cfg.GetClusterInfo()
1723
    group_info = self.cfg.GetNodeGroup(target_node.group)
1724
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1725
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1726
                           ignore=self.op.ignore_ipolicy)
1727

    
1728
    if self.instance.admin_state == constants.ADMINST_UP:
1729
      # check memory requirements on the secondary node
1730
      CheckNodeFreeMemory(
1731
          self, target_node.uuid, "failing over instance %s" %
1732
          self.instance.name, bep[constants.BE_MAXMEM],
1733
          self.instance.hypervisor,
1734
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1735
    else:
1736
      self.LogInfo("Not checking memory on the secondary node as"
1737
                   " instance will not be started")
1738

    
1739
    # check bridge existance
1740
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1741

    
1742
  def Exec(self, feedback_fn):
1743
    """Move an instance.
1744

1745
    The move is done by shutting it down on its present node, copying
1746
    the data over (slow) and starting it on the new node.
1747

1748
    """
1749
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1750
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1751

    
1752
    self.LogInfo("Shutting down instance %s on source node %s",
1753
                 self.instance.name, source_node.name)
1754

    
1755
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1756
            self.owned_locks(locking.LEVEL_NODE_RES))
1757

    
1758
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1759
                                             self.op.shutdown_timeout,
1760
                                             self.op.reason)
1761
    if self.op.ignore_consistency:
1762
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1763
                  " anyway. Please make sure node %s is down. Error details" %
1764
                  (self.instance.name, source_node.name, source_node.name),
1765
                  self.LogWarning)
1766
    else:
1767
      result.Raise("Could not shutdown instance %s on node %s" %
1768
                   (self.instance.name, source_node.name))
1769

    
1770
    # create the target disks
1771
    try:
1772
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1773
    except errors.OpExecError:
1774
      self.LogWarning("Device creation failed")
1775
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1776
      raise
1777

    
1778
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1779

    
1780
    errs = []
1781
    # activate, get path, copy the data over
1782
    for idx, disk in enumerate(self.instance.disks):
1783
      self.LogInfo("Copying data for disk %d", idx)
1784
      result = self.rpc.call_blockdev_assemble(
1785
                 target_node.uuid, (disk, self.instance), self.instance.name,
1786
                 True, idx)
1787
      if result.fail_msg:
1788
        self.LogWarning("Can't assemble newly created disk %d: %s",
1789
                        idx, result.fail_msg)
1790
        errs.append(result.fail_msg)
1791
        break
1792
      dev_path = result.payload
1793
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1794
                                                                self.instance),
1795
                                             target_node.name, dev_path,
1796
                                             cluster_name)
1797
      if result.fail_msg:
1798
        self.LogWarning("Can't copy data over for disk %d: %s",
1799
                        idx, result.fail_msg)
1800
        errs.append(result.fail_msg)
1801
        break
1802

    
1803
    if errs:
1804
      self.LogWarning("Some disks failed to copy, aborting")
1805
      try:
1806
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1807
      finally:
1808
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1809
        raise errors.OpExecError("Errors during disk copy: %s" %
1810
                                 (",".join(errs),))
1811

    
1812
    self.instance.primary_node = target_node.uuid
1813
    self.cfg.Update(self.instance, feedback_fn)
1814

    
1815
    self.LogInfo("Removing the disks on the original node")
1816
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1817

    
1818
    # Only start the instance if it's marked as up
1819
    if self.instance.admin_state == constants.ADMINST_UP:
1820
      self.LogInfo("Starting instance %s on node %s",
1821
                   self.instance.name, target_node.name)
1822

    
1823
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1824
                                          ignore_secondaries=True)
1825
      if not disks_ok:
1826
        ShutdownInstanceDisks(self, self.instance)
1827
        raise errors.OpExecError("Can't activate the instance's disks")
1828

    
1829
      result = self.rpc.call_instance_start(target_node.uuid,
1830
                                            (self.instance, None, None), False,
1831
                                            self.op.reason)
1832
      msg = result.fail_msg
1833
      if msg:
1834
        ShutdownInstanceDisks(self, self.instance)
1835
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1836
                                 (self.instance.name, target_node.name, msg))
1837

    
1838

    
1839
class LUInstanceMultiAlloc(NoHooksLU):
1840
  """Allocates multiple instances at the same time.
1841

1842
  """
1843
  REQ_BGL = False
1844

    
1845
  def CheckArguments(self):
1846
    """Check arguments.
1847

1848
    """
1849
    nodes = []
1850
    for inst in self.op.instances:
1851
      if inst.iallocator is not None:
1852
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1853
                                   " instance objects", errors.ECODE_INVAL)
1854
      nodes.append(bool(inst.pnode))
1855
      if inst.disk_template in constants.DTS_INT_MIRROR:
1856
        nodes.append(bool(inst.snode))
1857

    
1858
    has_nodes = compat.any(nodes)
1859
    if compat.all(nodes) ^ has_nodes:
1860
      raise errors.OpPrereqError("There are instance objects providing"
1861
                                 " pnode/snode while others do not",
1862
                                 errors.ECODE_INVAL)
1863

    
1864
    if not has_nodes and self.op.iallocator is None:
1865
      default_iallocator = self.cfg.GetDefaultIAllocator()
1866
      if default_iallocator:
1867
        self.op.iallocator = default_iallocator
1868
      else:
1869
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1870
                                   " given and no cluster-wide default"
1871
                                   " iallocator found; please specify either"
1872
                                   " an iallocator or nodes on the instances"
1873
                                   " or set a cluster-wide default iallocator",
1874
                                   errors.ECODE_INVAL)
1875

    
1876
    _CheckOpportunisticLocking(self.op)
1877

    
1878
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1879
    if dups:
1880
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1881
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1882

    
1883
  def ExpandNames(self):
1884
    """Calculate the locks.
1885

1886
    """
1887
    self.share_locks = ShareAll()
1888
    self.needed_locks = {
1889
      # iallocator will select nodes and even if no iallocator is used,
1890
      # collisions with LUInstanceCreate should be avoided
1891
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1892
      }
1893

    
1894
    if self.op.iallocator:
1895
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1896
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1897

    
1898
      if self.op.opportunistic_locking:
1899
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1900
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1901
    else:
1902
      nodeslist = []
1903
      for inst in self.op.instances:
1904
        (inst.pnode_uuid, inst.pnode) = \
1905
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1906
        nodeslist.append(inst.pnode_uuid)
1907
        if inst.snode is not None:
1908
          (inst.snode_uuid, inst.snode) = \
1909
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1910
          nodeslist.append(inst.snode_uuid)
1911

    
1912
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1913
      # Lock resources of instance's primary and secondary nodes (copy to
1914
      # prevent accidential modification)
1915
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1916

    
1917
  def CheckPrereq(self):
1918
    """Check prerequisite.
1919

1920
    """
1921
    if self.op.iallocator:
1922
      cluster = self.cfg.GetClusterInfo()
1923
      default_vg = self.cfg.GetVGName()
1924
      ec_id = self.proc.GetECId()
1925

    
1926
      if self.op.opportunistic_locking:
1927
        # Only consider nodes for which a lock is held
1928
        node_whitelist = self.cfg.GetNodeNames(
1929
                           list(self.owned_locks(locking.LEVEL_NODE)))
1930
      else:
1931
        node_whitelist = None
1932

    
1933
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1934
                                           _ComputeNics(op, cluster, None,
1935
                                                        self.cfg, ec_id),
1936
                                           _ComputeFullBeParams(op, cluster),
1937
                                           node_whitelist)
1938
               for op in self.op.instances]
1939

    
1940
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1941
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1942

    
1943
      ial.Run(self.op.iallocator)
1944

    
1945
      if not ial.success:
1946
        raise errors.OpPrereqError("Can't compute nodes using"
1947
                                   " iallocator '%s': %s" %
1948
                                   (self.op.iallocator, ial.info),
1949
                                   errors.ECODE_NORES)
1950

    
1951
      self.ia_result = ial.result
1952

    
1953
    if self.op.dry_run:
1954
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1955
        constants.JOB_IDS_KEY: [],
1956
        })
1957

    
1958
  def _ConstructPartialResult(self):
1959
    """Contructs the partial result.
1960

1961
    """
1962
    if self.op.iallocator:
1963
      (allocatable, failed_insts) = self.ia_result
1964
      allocatable_insts = map(compat.fst, allocatable)
1965
    else:
1966
      allocatable_insts = [op.instance_name for op in self.op.instances]
1967
      failed_insts = []
1968

    
1969
    return {
1970
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1971
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1972
      }
1973

    
1974
  def Exec(self, feedback_fn):
1975
    """Executes the opcode.
1976

1977
    """
1978
    jobs = []
1979
    if self.op.iallocator:
1980
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
1981
      (allocatable, failed) = self.ia_result
1982

    
1983
      for (name, node_names) in allocatable:
1984
        op = op2inst.pop(name)
1985

    
1986
        (op.pnode_uuid, op.pnode) = \
1987
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1988
        if len(node_names) > 1:
1989
          (op.snode_uuid, op.snode) = \
1990
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1991

    
1992
          jobs.append([op])
1993

    
1994
        missing = set(op2inst.keys()) - set(failed)
1995
        assert not missing, \
1996
          "Iallocator did return incomplete result: %s" % \
1997
          utils.CommaJoin(missing)
1998
    else:
1999
      jobs.extend([op] for op in self.op.instances)
2000

    
2001
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
2002

    
2003

    
2004
class _InstNicModPrivate:
2005
  """Data structure for network interface modifications.
2006

2007
  Used by L{LUInstanceSetParams}.
2008

2009
  """
2010
  def __init__(self):
2011
    self.params = None
2012
    self.filled = None
2013

    
2014

    
2015
def _PrepareContainerMods(mods, private_fn):
2016
  """Prepares a list of container modifications by adding a private data field.
2017

2018
  @type mods: list of tuples; (operation, index, parameters)
2019
  @param mods: List of modifications
2020
  @type private_fn: callable or None
2021
  @param private_fn: Callable for constructing a private data field for a
2022
    modification
2023
  @rtype: list
2024

2025
  """
2026
  if private_fn is None:
2027
    fn = lambda: None
2028
  else:
2029
    fn = private_fn
2030

    
2031
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2032

    
2033

    
2034
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2035
  """Checks if nodes have enough physical CPUs
2036

2037
  This function checks if all given nodes have the needed number of
2038
  physical CPUs. In case any node has less CPUs or we cannot get the
2039
  information from the node, this function raises an OpPrereqError
2040
  exception.
2041

2042
  @type lu: C{LogicalUnit}
2043
  @param lu: a logical unit from which we get configuration data
2044
  @type node_uuids: C{list}
2045
  @param node_uuids: the list of node UUIDs to check
2046
  @type requested: C{int}
2047
  @param requested: the minimum acceptable number of physical CPUs
2048
  @type hypervisor_specs: list of pairs (string, dict of strings)
2049
  @param hypervisor_specs: list of hypervisor specifications in
2050
      pairs (hypervisor_name, hvparams)
2051
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2052
      or we cannot check the node
2053

2054
  """
2055
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2056
  for node_uuid in node_uuids:
2057
    info = nodeinfo[node_uuid]
2058
    node_name = lu.cfg.GetNodeName(node_uuid)
2059
    info.Raise("Cannot get current information from node %s" % node_name,
2060
               prereq=True, ecode=errors.ECODE_ENVIRON)
2061
    (_, _, (hv_info, )) = info.payload
2062
    num_cpus = hv_info.get("cpu_total", None)
2063
    if not isinstance(num_cpus, int):
2064
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2065
                                 " on node %s, result was '%s'" %
2066
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2067
    if requested > num_cpus:
2068
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2069
                                 "required" % (node_name, num_cpus, requested),
2070
                                 errors.ECODE_NORES)
2071

    
2072

    
2073
def GetItemFromContainer(identifier, kind, container):
2074
  """Return the item refered by the identifier.
2075

2076
  @type identifier: string
2077
  @param identifier: Item index or name or UUID
2078
  @type kind: string
2079
  @param kind: One-word item description
2080
  @type container: list
2081
  @param container: Container to get the item from
2082

2083
  """
2084
  # Index
2085
  try:
2086
    idx = int(identifier)
2087
    if idx == -1:
2088
      # Append
2089
      absidx = len(container) - 1
2090
    elif idx < 0:
2091
      raise IndexError("Not accepting negative indices other than -1")
2092
    elif idx > len(container):
2093
      raise IndexError("Got %s index %s, but there are only %s" %
2094
                       (kind, idx, len(container)))
2095
    else:
2096
      absidx = idx
2097
    return (absidx, container[idx])
2098
  except ValueError:
2099
    pass
2100

    
2101
  for idx, item in enumerate(container):
2102
    if item.uuid == identifier or item.name == identifier:
2103
      return (idx, item)
2104

    
2105
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2106
                             (kind, identifier), errors.ECODE_NOENT)
2107

    
2108

    
2109
def _ApplyContainerMods(kind, container, chgdesc, mods,
2110
                        create_fn, modify_fn, remove_fn):
2111
  """Applies descriptions in C{mods} to C{container}.
2112

2113
  @type kind: string
2114
  @param kind: One-word item description
2115
  @type container: list
2116
  @param container: Container to modify
2117
  @type chgdesc: None or list
2118
  @param chgdesc: List of applied changes
2119
  @type mods: list
2120
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2121
  @type create_fn: callable
2122
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2123
    receives absolute item index, parameters and private data object as added
2124
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2125
    as list
2126
  @type modify_fn: callable
2127
  @param modify_fn: Callback for modifying an existing item
2128
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2129
    and private data object as added by L{_PrepareContainerMods}, returns
2130
    changes as list
2131
  @type remove_fn: callable
2132
  @param remove_fn: Callback on removing item; receives absolute item index,
2133
    item and private data object as added by L{_PrepareContainerMods}
2134

2135
  """
2136
  for (op, identifier, params, private) in mods:
2137
    changes = None
2138

    
2139
    if op == constants.DDM_ADD:
2140
      # Calculate where item will be added
2141
      # When adding an item, identifier can only be an index
2142
      try:
2143
        idx = int(identifier)
2144
      except ValueError:
2145
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2146
                                   " identifier for %s" % constants.DDM_ADD,
2147
                                   errors.ECODE_INVAL)
2148
      if idx == -1:
2149
        addidx = len(container)
2150
      else:
2151
        if idx < 0:
2152
          raise IndexError("Not accepting negative indices other than -1")
2153
        elif idx > len(container):
2154
          raise IndexError("Got %s index %s, but there are only %s" %
2155
                           (kind, idx, len(container)))
2156
        addidx = idx
2157

    
2158
      if create_fn is None:
2159
        item = params
2160
      else:
2161
        (item, changes) = create_fn(addidx, params, private)
2162

    
2163
      if idx == -1:
2164
        container.append(item)
2165
      else:
2166
        assert idx >= 0
2167
        assert idx <= len(container)
2168
        # list.insert does so before the specified index
2169
        container.insert(idx, item)
2170
    else:
2171
      # Retrieve existing item
2172
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2173

    
2174
      if op == constants.DDM_REMOVE:
2175
        assert not params
2176

    
2177
        if remove_fn is not None:
2178
          remove_fn(absidx, item, private)
2179

    
2180
        changes = [("%s/%s" % (kind, absidx), "remove")]
2181

    
2182
        assert container[absidx] == item
2183
        del container[absidx]
2184
      elif op == constants.DDM_MODIFY:
2185
        if modify_fn is not None:
2186
          changes = modify_fn(absidx, item, params, private)
2187
      else:
2188
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2189

    
2190
    assert _TApplyContModsCbChanges(changes)
2191

    
2192
    if not (chgdesc is None or changes is None):
2193
      chgdesc.extend(changes)
2194

    
2195

    
2196
def _UpdateIvNames(base_index, disks):
2197
  """Updates the C{iv_name} attribute of disks.
2198

2199
  @type disks: list of L{objects.Disk}
2200

2201
  """
2202
  for (idx, disk) in enumerate(disks):
2203
    disk.iv_name = "disk/%s" % (base_index + idx, )
2204

    
2205

    
2206
class LUInstanceSetParams(LogicalUnit):
2207
  """Modifies an instances's parameters.
2208

2209
  """
2210
  HPATH = "instance-modify"
2211
  HTYPE = constants.HTYPE_INSTANCE
2212
  REQ_BGL = False
2213

    
2214
  @staticmethod
2215
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2216
    assert ht.TList(mods)
2217
    assert not mods or len(mods[0]) in (2, 3)
2218

    
2219
    if mods and len(mods[0]) == 2:
2220
      result = []
2221

    
2222
      addremove = 0
2223
      for op, params in mods:
2224
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2225
          result.append((op, -1, params))
2226
          addremove += 1
2227

    
2228
          if addremove > 1:
2229
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2230
                                       " supported at a time" % kind,
2231
                                       errors.ECODE_INVAL)
2232
        else:
2233
          result.append((constants.DDM_MODIFY, op, params))
2234

    
2235
      assert verify_fn(result)
2236
    else:
2237
      result = mods
2238

    
2239
    return result
2240

    
2241
  @staticmethod
2242
  def _CheckMods(kind, mods, key_types, item_fn):
2243
    """Ensures requested disk/NIC modifications are valid.
2244

2245
    """
2246
    for (op, _, params) in mods:
2247
      assert ht.TDict(params)
2248

    
2249
      # If 'key_types' is an empty dict, we assume we have an
2250
      # 'ext' template and thus do not ForceDictType
2251
      if key_types:
2252
        utils.ForceDictType(params, key_types)
2253

    
2254
      if op == constants.DDM_REMOVE:
2255
        if params:
2256
          raise errors.OpPrereqError("No settings should be passed when"
2257
                                     " removing a %s" % kind,
2258
                                     errors.ECODE_INVAL)
2259
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2260
        item_fn(op, params)
2261
      else:
2262
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2263

    
2264
  @staticmethod
2265
  def _VerifyDiskModification(op, params, excl_stor):
2266
    """Verifies a disk modification.
2267

2268
    """
2269
    if op == constants.DDM_ADD:
2270
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2271
      if mode not in constants.DISK_ACCESS_SET:
2272
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2273
                                   errors.ECODE_INVAL)
2274

    
2275
      size = params.get(constants.IDISK_SIZE, None)
2276
      if size is None:
2277
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2278
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2279

    
2280
      try:
2281
        size = int(size)
2282
      except (TypeError, ValueError), err:
2283
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2284
                                   errors.ECODE_INVAL)
2285

    
2286
      params[constants.IDISK_SIZE] = size
2287
      name = params.get(constants.IDISK_NAME, None)
2288
      if name is not None and name.lower() == constants.VALUE_NONE:
2289
        params[constants.IDISK_NAME] = None
2290

    
2291
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2292

    
2293
    elif op == constants.DDM_MODIFY:
2294
      if constants.IDISK_SIZE in params:
2295
        raise errors.OpPrereqError("Disk size change not possible, use"
2296
                                   " grow-disk", errors.ECODE_INVAL)
2297
      if len(params) > 2:
2298
        raise errors.OpPrereqError("Disk modification doesn't support"
2299
                                   " additional arbitrary parameters",
2300
                                   errors.ECODE_INVAL)
2301
      name = params.get(constants.IDISK_NAME, None)
2302
      if name is not None and name.lower() == constants.VALUE_NONE:
2303
        params[constants.IDISK_NAME] = None
2304

    
2305
  @staticmethod
2306
  def _VerifyNicModification(op, params):
2307
    """Verifies a network interface modification.
2308

2309
    """
2310
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2311
      ip = params.get(constants.INIC_IP, None)
2312
      name = params.get(constants.INIC_NAME, None)
2313
      req_net = params.get(constants.INIC_NETWORK, None)
2314
      link = params.get(constants.NIC_LINK, None)
2315
      mode = params.get(constants.NIC_MODE, None)
2316
      if name is not None and name.lower() == constants.VALUE_NONE:
2317
        params[constants.INIC_NAME] = None
2318
      if req_net is not None:
2319
        if req_net.lower() == constants.VALUE_NONE:
2320
          params[constants.INIC_NETWORK] = None
2321
          req_net = None
2322
        elif link is not None or mode is not None:
2323
          raise errors.OpPrereqError("If network is given"
2324
                                     " mode or link should not",
2325
                                     errors.ECODE_INVAL)
2326

    
2327
      if op == constants.DDM_ADD:
2328
        macaddr = params.get(constants.INIC_MAC, None)
2329
        if macaddr is None:
2330
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2331

    
2332
      if ip is not None:
2333
        if ip.lower() == constants.VALUE_NONE:
2334
          params[constants.INIC_IP] = None
2335
        else:
2336
          if ip.lower() == constants.NIC_IP_POOL:
2337
            if op == constants.DDM_ADD and req_net is None:
2338
              raise errors.OpPrereqError("If ip=pool, parameter network"
2339
                                         " cannot be none",
2340
                                         errors.ECODE_INVAL)
2341
          else:
2342
            if not netutils.IPAddress.IsValid(ip):
2343
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2344
                                         errors.ECODE_INVAL)
2345

    
2346
      if constants.INIC_MAC in params:
2347
        macaddr = params[constants.INIC_MAC]
2348
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2349
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2350

    
2351
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2352
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2353
                                     " modifying an existing NIC",
2354
                                     errors.ECODE_INVAL)
2355

    
2356
  def CheckArguments(self):
2357
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2358
            self.op.hvparams or self.op.beparams or self.op.os_name or
2359
            self.op.osparams or self.op.offline is not None or
2360
            self.op.runtime_mem or self.op.pnode):
2361
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2362

    
2363
    if self.op.hvparams:
2364
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2365
                           "hypervisor", "instance", "cluster")
2366

    
2367
    self.op.disks = self._UpgradeDiskNicMods(
2368
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2369
    self.op.nics = self._UpgradeDiskNicMods(
2370
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2371

    
2372
    if self.op.disks and self.op.disk_template is not None:
2373
      raise errors.OpPrereqError("Disk template conversion and other disk"
2374
                                 " changes not supported at the same time",
2375
                                 errors.ECODE_INVAL)
2376

    
2377
    if (self.op.disk_template and
2378
        self.op.disk_template in constants.DTS_INT_MIRROR and
2379
        self.op.remote_node is None):
2380
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2381
                                 " one requires specifying a secondary node",
2382
                                 errors.ECODE_INVAL)
2383

    
2384
    # Check NIC modifications
2385
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2386
                    self._VerifyNicModification)
2387

    
2388
    if self.op.pnode:
2389
      (self.op.pnode_uuid, self.op.pnode) = \
2390
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2391

    
2392
  def ExpandNames(self):
2393
    self._ExpandAndLockInstance()
2394
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2395
    # Can't even acquire node locks in shared mode as upcoming changes in
2396
    # Ganeti 2.6 will start to modify the node object on disk conversion
2397
    self.needed_locks[locking.LEVEL_NODE] = []
2398
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2399
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2400
    # Look node group to look up the ipolicy
2401
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2402

    
2403
  def DeclareLocks(self, level):
2404
    if level == locking.LEVEL_NODEGROUP:
2405
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2406
      # Acquire locks for the instance's nodegroups optimistically. Needs
2407
      # to be verified in CheckPrereq
2408
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2409
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2410
    elif level == locking.LEVEL_NODE:
2411
      self._LockInstancesNodes()
2412
      if self.op.disk_template and self.op.remote_node:
2413
        (self.op.remote_node_uuid, self.op.remote_node) = \
2414
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2415
                                self.op.remote_node)
2416
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2417
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2418
      # Copy node locks
2419
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2420
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2421

    
2422
  def BuildHooksEnv(self):
2423
    """Build hooks env.
2424

2425
    This runs on the master, primary and secondaries.
2426

2427
    """
2428
    args = {}
2429
    if constants.BE_MINMEM in self.be_new:
2430
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2431
    if constants.BE_MAXMEM in self.be_new:
2432
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2433
    if constants.BE_VCPUS in self.be_new:
2434
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2435
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2436
    # information at all.
2437

    
2438
    if self._new_nics is not None:
2439
      nics = []
2440

    
2441
      for nic in self._new_nics:
2442
        n = copy.deepcopy(nic)
2443
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2444
        n.nicparams = nicparams
2445
        nics.append(NICToTuple(self, n))
2446

    
2447
      args["nics"] = nics
2448

    
2449
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2450
    if self.op.disk_template:
2451
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2452
    if self.op.runtime_mem:
2453
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2454

    
2455
    return env
2456

    
2457
  def BuildHooksNodes(self):
2458
    """Build hooks nodes.
2459

2460
    """
2461
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2462
    return (nl, nl)
2463

    
2464
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2465
                              old_params, cluster, pnode_uuid):
2466

    
2467
    update_params_dict = dict([(key, params[key])
2468
                               for key in constants.NICS_PARAMETERS
2469
                               if key in params])
2470

    
2471
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2472
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2473

    
2474
    new_net_uuid = None
2475
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2476
    if new_net_uuid_or_name:
2477
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2478
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2479

    
2480
    if old_net_uuid:
2481
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2482

    
2483
    if new_net_uuid:
2484
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2485
      if not netparams:
2486
        raise errors.OpPrereqError("No netparams found for the network"
2487
                                   " %s, probably not connected" %
2488
                                   new_net_obj.name, errors.ECODE_INVAL)
2489
      new_params = dict(netparams)
2490
    else:
2491
      new_params = GetUpdatedParams(old_params, update_params_dict)
2492

    
2493
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2494

    
2495
    new_filled_params = cluster.SimpleFillNIC(new_params)
2496
    objects.NIC.CheckParameterSyntax(new_filled_params)
2497

    
2498
    new_mode = new_filled_params[constants.NIC_MODE]
2499
    if new_mode == constants.NIC_MODE_BRIDGED:
2500
      bridge = new_filled_params[constants.NIC_LINK]
2501
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2502
      if msg:
2503
        msg = "Error checking bridges on node '%s': %s" % \
2504
                (self.cfg.GetNodeName(pnode_uuid), msg)
2505
        if self.op.force:
2506
          self.warn.append(msg)
2507
        else:
2508
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2509

    
2510
    elif new_mode == constants.NIC_MODE_ROUTED:
2511
      ip = params.get(constants.INIC_IP, old_ip)
2512
      if ip is None:
2513
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2514
                                   " on a routed NIC", errors.ECODE_INVAL)
2515

    
2516
    elif new_mode == constants.NIC_MODE_OVS:
2517
      # TODO: check OVS link
2518
      self.LogInfo("OVS links are currently not checked for correctness")
2519

    
2520
    if constants.INIC_MAC in params:
2521
      mac = params[constants.INIC_MAC]
2522
      if mac is None:
2523
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2524
                                   errors.ECODE_INVAL)
2525
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2526
        # otherwise generate the MAC address
2527
        params[constants.INIC_MAC] = \
2528
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2529
      else:
2530
        # or validate/reserve the current one
2531
        try:
2532
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2533
        except errors.ReservationError:
2534
          raise errors.OpPrereqError("MAC address '%s' already in use"
2535
                                     " in cluster" % mac,
2536
                                     errors.ECODE_NOTUNIQUE)
2537
    elif new_net_uuid != old_net_uuid:
2538

    
2539
      def get_net_prefix(net_uuid):
2540
        mac_prefix = None
2541
        if net_uuid:
2542
          nobj = self.cfg.GetNetwork(net_uuid)
2543
          mac_prefix = nobj.mac_prefix
2544

    
2545
        return mac_prefix
2546

    
2547
      new_prefix = get_net_prefix(new_net_uuid)
2548
      old_prefix = get_net_prefix(old_net_uuid)
2549
      if old_prefix != new_prefix:
2550
        params[constants.INIC_MAC] = \
2551
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2552

    
2553
    # if there is a change in (ip, network) tuple
2554
    new_ip = params.get(constants.INIC_IP, old_ip)
2555
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2556
      if new_ip:
2557
        # if IP is pool then require a network and generate one IP
2558
        if new_ip.lower() == constants.NIC_IP_POOL:
2559
          if new_net_uuid:
2560
            try:
2561
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2562
            except errors.ReservationError:
2563
              raise errors.OpPrereqError("Unable to get a free IP"
2564
                                         " from the address pool",
2565
                                         errors.ECODE_STATE)
2566
            self.LogInfo("Chose IP %s from network %s",
2567
                         new_ip,
2568
                         new_net_obj.name)
2569
            params[constants.INIC_IP] = new_ip
2570
          else:
2571
            raise errors.OpPrereqError("ip=pool, but no network found",
2572
                                       errors.ECODE_INVAL)
2573
        # Reserve new IP if in the new network if any
2574
        elif new_net_uuid:
2575
          try:
2576
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2577
            self.LogInfo("Reserving IP %s in network %s",
2578
                         new_ip, new_net_obj.name)
2579
          except errors.ReservationError:
2580
            raise errors.OpPrereqError("IP %s not available in network %s" %
2581
                                       (new_ip, new_net_obj.name),
2582
                                       errors.ECODE_NOTUNIQUE)
2583
        # new network is None so check if new IP is a conflicting IP
2584
        elif self.op.conflicts_check:
2585
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2586

    
2587
      # release old IP if old network is not None
2588
      if old_ip and old_net_uuid:
2589
        try:
2590
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2591
        except errors.AddressPoolError:
2592
          logging.warning("Release IP %s not contained in network %s",
2593
                          old_ip, old_net_obj.name)
2594

    
2595
    # there are no changes in (ip, network) tuple and old network is not None
2596
    elif (old_net_uuid is not None and
2597
          (req_link is not None or req_mode is not None)):
2598
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2599
                                 " a NIC that is connected to a network",
2600
                                 errors.ECODE_INVAL)
2601

    
2602
    private.params = new_params
2603
    private.filled = new_filled_params
2604

    
2605
  def _PreCheckDiskTemplate(self, pnode_info):
2606
    """CheckPrereq checks related to a new disk template."""
2607
    # Arguments are passed to avoid configuration lookups
2608
    pnode_uuid = self.instance.primary_node
2609
    if self.instance.disk_template == self.op.disk_template:
2610
      raise errors.OpPrereqError("Instance already has disk template %s" %
2611
                                 self.instance.disk_template,
2612
                                 errors.ECODE_INVAL)
2613

    
2614
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2615
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2616
                                 " cluster." % self.op.disk_template)
2617

    
2618
    if (self.instance.disk_template,
2619
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2620
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2621
                                 " %s to %s" % (self.instance.disk_template,
2622
                                                self.op.disk_template),
2623
                                 errors.ECODE_INVAL)
2624
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2625
                       msg="cannot change disk template")
2626
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2627
      if self.op.remote_node_uuid == pnode_uuid:
2628
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2629
                                   " as the primary node of the instance" %
2630
                                   self.op.remote_node, errors.ECODE_STATE)
2631
      CheckNodeOnline(self, self.op.remote_node_uuid)
2632
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2633
      # FIXME: here we assume that the old instance type is DT_PLAIN
2634
      assert self.instance.disk_template == constants.DT_PLAIN
2635
      disks = [{constants.IDISK_SIZE: d.size,
2636
                constants.IDISK_VG: d.logical_id[0]}
2637
               for d in self.instance.disks]
2638
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2639
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2640

    
2641
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2642
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2643
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2644
                                                              snode_group)
2645
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2646
                             ignore=self.op.ignore_ipolicy)
2647
      if pnode_info.group != snode_info.group:
2648
        self.LogWarning("The primary and secondary nodes are in two"
2649
                        " different node groups; the disk parameters"
2650
                        " from the first disk's node group will be"
2651
                        " used")
2652

    
2653
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2654
      # Make sure none of the nodes require exclusive storage
2655
      nodes = [pnode_info]
2656
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2657
        assert snode_info
2658
        nodes.append(snode_info)
2659
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2660
      if compat.any(map(has_es, nodes)):
2661
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2662
                  " storage is enabled" % (self.instance.disk_template,
2663
                                           self.op.disk_template))
2664
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2665

    
2666
  def _PreCheckDisks(self, ispec):
2667
    """CheckPrereq checks related to disk changes.
2668

2669
    @type ispec: dict
2670
    @param ispec: instance specs to be updated with the new disks
2671

2672
    """
2673
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2674

    
2675
    excl_stor = compat.any(
2676
      rpc.GetExclusiveStorageForNodes(self.cfg,
2677
                                      self.instance.all_nodes).values()
2678
      )
2679

    
2680
    # Check disk modifications. This is done here and not in CheckArguments
2681
    # (as with NICs), because we need to know the instance's disk template
2682
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2683
    if self.instance.disk_template == constants.DT_EXT:
2684
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2685
    else:
2686
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2687
                      ver_fn)
2688

    
2689
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2690

    
2691
    # Check the validity of the `provider' parameter
2692
    if self.instance.disk_template in constants.DT_EXT:
2693
      for mod in self.diskmod:
2694
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2695
        if mod[0] == constants.DDM_ADD:
2696
          if ext_provider is None:
2697
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2698
                                       " '%s' missing, during disk add" %
2699
                                       (constants.DT_EXT,
2700
                                        constants.IDISK_PROVIDER),
2701
                                       errors.ECODE_NOENT)
2702
        elif mod[0] == constants.DDM_MODIFY:
2703
          if ext_provider:
2704
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2705
                                       " modification" %
2706
                                       constants.IDISK_PROVIDER,
2707
                                       errors.ECODE_INVAL)
2708
    else:
2709
      for mod in self.diskmod:
2710
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2711
        if ext_provider is not None:
2712
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2713
                                     " instances of type '%s'" %
2714
                                     (constants.IDISK_PROVIDER,
2715
                                      constants.DT_EXT),
2716
                                     errors.ECODE_INVAL)
2717

    
2718
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2719
      raise errors.OpPrereqError("Disk operations not supported for"
2720
                                 " diskless instances", errors.ECODE_INVAL)
2721

    
2722
    def _PrepareDiskMod(_, disk, params, __):
2723
      disk.name = params.get(constants.IDISK_NAME, None)
2724

    
2725
    # Verify disk changes (operating on a copy)
2726
    disks = copy.deepcopy(self.instance.disks)
2727
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2728
                        _PrepareDiskMod, None)
2729
    utils.ValidateDeviceNames("disk", disks)
2730
    if len(disks) > constants.MAX_DISKS:
2731
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2732
                                 " more" % constants.MAX_DISKS,
2733
                                 errors.ECODE_STATE)
2734
    disk_sizes = [disk.size for disk in self.instance.disks]
2735
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2736
                      self.diskmod if op == constants.DDM_ADD)
2737
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2738
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2739

    
2740
    if self.op.offline is not None and self.op.offline:
2741
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2742
                         msg="can't change to offline")
2743

    
2744
  def CheckPrereq(self):
2745
    """Check prerequisites.
2746

2747
    This only checks the instance list against the existing names.
2748

2749
    """
2750
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2751
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2752
    self.cluster = self.cfg.GetClusterInfo()
2753

    
2754
    assert self.instance is not None, \
2755
      "Cannot retrieve locked instance %s" % self.op.instance_name
2756

    
2757
    pnode_uuid = self.instance.primary_node
2758

    
2759
    self.warn = []
2760

    
2761
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2762
        not self.op.force):
2763
      # verify that the instance is not up
2764
      instance_info = self.rpc.call_instance_info(
2765
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2766
          self.instance.hvparams)
2767
      if instance_info.fail_msg:
2768
        self.warn.append("Can't get instance runtime information: %s" %
2769
                         instance_info.fail_msg)
2770
      elif instance_info.payload:
2771
        raise errors.OpPrereqError("Instance is still running on %s" %
2772
                                   self.cfg.GetNodeName(pnode_uuid),
2773
                                   errors.ECODE_STATE)
2774

    
2775
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2776
    node_uuids = list(self.instance.all_nodes)
2777
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2778

    
2779
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2780
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2781
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2782

    
2783
    # dictionary with instance information after the modification
2784
    ispec = {}
2785

    
2786
    # Prepare NIC modifications
2787
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2788

    
2789
    # OS change
2790
    if self.op.os_name and not self.op.force:
2791
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2792
                     self.op.force_variant)
2793
      instance_os = self.op.os_name
2794
    else:
2795
      instance_os = self.instance.os
2796

    
2797
    assert not (self.op.disk_template and self.op.disks), \
2798
      "Can't modify disk template and apply disk changes at the same time"
2799

    
2800
    if self.op.disk_template:
2801
      self._PreCheckDiskTemplate(pnode_info)
2802

    
2803
    self._PreCheckDisks(ispec)
2804

    
2805
    # hvparams processing
2806
    if self.op.hvparams:
2807
      hv_type = self.instance.hypervisor
2808
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2809
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2810
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2811

    
2812
      # local check
2813
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2814
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2815
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2816
      self.hv_inst = i_hvdict # the new dict (without defaults)
2817
    else:
2818
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2819
                                                   self.instance.os,
2820
                                                   self.instance.hvparams)
2821
      self.hv_new = self.hv_inst = {}
2822

    
2823
    # beparams processing
2824
    if self.op.beparams:
2825
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2826
                                  use_none=True)
2827
      objects.UpgradeBeParams(i_bedict)
2828
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2829
      be_new = self.cluster.SimpleFillBE(i_bedict)
2830
      self.be_proposed = self.be_new = be_new # the new actual values
2831
      self.be_inst = i_bedict # the new dict (without defaults)
2832
    else:
2833
      self.be_new = self.be_inst = {}
2834
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2835
    be_old = self.cluster.FillBE(self.instance)
2836

    
2837
    # CPU param validation -- checking every time a parameter is
2838
    # changed to cover all cases where either CPU mask or vcpus have
2839
    # changed
2840
    if (constants.BE_VCPUS in self.be_proposed and
2841
        constants.HV_CPU_MASK in self.hv_proposed):
2842
      cpu_list = \
2843
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2844
      # Verify mask is consistent with number of vCPUs. Can skip this
2845
      # test if only 1 entry in the CPU mask, which means same mask
2846
      # is applied to all vCPUs.
2847
      if (len(cpu_list) > 1 and
2848
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2849
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2850
                                   " CPU mask [%s]" %
2851
                                   (self.be_proposed[constants.BE_VCPUS],
2852
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2853
                                   errors.ECODE_INVAL)
2854

    
2855
      # Only perform this test if a new CPU mask is given
2856
      if constants.HV_CPU_MASK in self.hv_new:
2857
        # Calculate the largest CPU number requested
2858
        max_requested_cpu = max(map(max, cpu_list))
2859
        # Check that all of the instance's nodes have enough physical CPUs to
2860
        # satisfy the requested CPU mask
2861
        hvspecs = [(self.instance.hypervisor,
2862
                    self.cfg.GetClusterInfo()
2863
                      .hvparams[self.instance.hypervisor])]
2864
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2865
                                max_requested_cpu + 1,
2866
                                hvspecs)
2867

    
2868
    # osparams processing
2869
    if self.op.osparams:
2870
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2871
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2872
      self.os_inst = i_osdict # the new dict (without defaults)
2873
    else:
2874
      self.os_inst = {}
2875

    
2876
    #TODO(dynmem): do the appropriate check involving MINMEM
2877
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2878
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2879
      mem_check_list = [pnode_uuid]
2880
      if be_new[constants.BE_AUTO_BALANCE]:
2881
        # either we changed auto_balance to yes or it was from before
2882
        mem_check_list.extend(self.instance.secondary_nodes)
2883
      instance_info = self.rpc.call_instance_info(
2884
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2885
          self.instance.hvparams)
2886
      hvspecs = [(self.instance.hypervisor,
2887
                  self.cluster.hvparams[self.instance.hypervisor])]
2888
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2889
                                         hvspecs)
2890
      pninfo = nodeinfo[pnode_uuid]
2891
      msg = pninfo.fail_msg
2892
      if msg:
2893
        # Assume the primary node is unreachable and go ahead
2894
        self.warn.append("Can't get info from primary node %s: %s" %
2895
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2896
      else:
2897
        (_, _, (pnhvinfo, )) = pninfo.payload
2898
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2899
          self.warn.append("Node data from primary node %s doesn't contain"
2900
                           " free memory information" %
2901
                           self.cfg.GetNodeName(pnode_uuid))
2902
        elif instance_info.fail_msg:
2903
          self.warn.append("Can't get instance runtime information: %s" %
2904
                           instance_info.fail_msg)
2905
        else:
2906
          if instance_info.payload:
2907
            current_mem = int(instance_info.payload["memory"])
2908
          else:
2909
            # Assume instance not running
2910
            # (there is a slight race condition here, but it's not very
2911
            # probable, and we have no other way to check)
2912
            # TODO: Describe race condition
2913
            current_mem = 0
2914
          #TODO(dynmem): do the appropriate check involving MINMEM
2915
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2916
                      pnhvinfo["memory_free"])
2917
          if miss_mem > 0:
2918
            raise errors.OpPrereqError("This change will prevent the instance"
2919
                                       " from starting, due to %d MB of memory"
2920
                                       " missing on its primary node" %
2921
                                       miss_mem, errors.ECODE_NORES)
2922

    
2923
      if be_new[constants.BE_AUTO_BALANCE]:
2924
        for node_uuid, nres in nodeinfo.items():
2925
          if node_uuid not in self.instance.secondary_nodes:
2926
            continue
2927
          nres.Raise("Can't get info from secondary node %s" %
2928
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2929
                     ecode=errors.ECODE_STATE)
2930
          (_, _, (nhvinfo, )) = nres.payload
2931
          if not isinstance(nhvinfo.get("memory_free", None), int):
2932
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2933
                                       " memory information" %
2934
                                       self.cfg.GetNodeName(node_uuid),
2935
                                       errors.ECODE_STATE)
2936
          #TODO(dynmem): do the appropriate check involving MINMEM
2937
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2938
            raise errors.OpPrereqError("This change will prevent the instance"
2939
                                       " from failover to its secondary node"
2940
                                       " %s, due to not enough memory" %
2941
                                       self.cfg.GetNodeName(node_uuid),
2942
                                       errors.ECODE_STATE)
2943

    
2944
    if self.op.runtime_mem:
2945
      remote_info = self.rpc.call_instance_info(
2946
         self.instance.primary_node, self.instance.name,
2947
         self.instance.hypervisor,
2948
         self.cluster.hvparams[self.instance.hypervisor])
2949
      remote_info.Raise("Error checking node %s" %
2950
                        self.cfg.GetNodeName(self.instance.primary_node))
2951
      if not remote_info.payload: # not running already
2952
        raise errors.OpPrereqError("Instance %s is not running" %
2953
                                   self.instance.name, errors.ECODE_STATE)
2954

    
2955
      current_memory = remote_info.payload["memory"]
2956
      if (not self.op.force and
2957
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2958
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2959
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2960
                                   " and %d MB of memory unless --force is"
2961
                                   " given" %
2962
                                   (self.instance.name,
2963
                                    self.be_proposed[constants.BE_MINMEM],
2964
                                    self.be_proposed[constants.BE_MAXMEM]),
2965
                                   errors.ECODE_INVAL)
2966

    
2967
      delta = self.op.runtime_mem - current_memory
2968
      if delta > 0:
2969
        CheckNodeFreeMemory(
2970
            self, self.instance.primary_node,
2971
            "ballooning memory for instance %s" % self.instance.name, delta,
2972
            self.instance.hypervisor,
2973
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2974

    
2975
    # make self.cluster visible in the functions below
2976
    cluster = self.cluster
2977

    
2978
    def _PrepareNicCreate(_, params, private):
2979
      self._PrepareNicModification(params, private, None, None,
2980
                                   {}, cluster, pnode_uuid)
2981
      return (None, None)
2982

    
2983
    def _PrepareNicMod(_, nic, params, private):
2984
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2985
                                   nic.nicparams, cluster, pnode_uuid)
2986
      return None
2987

    
2988
    def _PrepareNicRemove(_, params, __):
2989
      ip = params.ip
2990
      net = params.network
2991
      if net is not None and ip is not None:
2992
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2993

    
2994
    # Verify NIC changes (operating on copy)
2995
    nics = self.instance.nics[:]
2996
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2997
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2998
    if len(nics) > constants.MAX_NICS:
2999
      raise errors.OpPrereqError("Instance has too many network interfaces"
3000
                                 " (%d), cannot add more" % constants.MAX_NICS,
3001
                                 errors.ECODE_STATE)
3002

    
3003
    # Pre-compute NIC changes (necessary to use result in hooks)
3004
    self._nic_chgdesc = []
3005
    if self.nicmod:
3006
      # Operate on copies as this is still in prereq
3007
      nics = [nic.Copy() for nic in self.instance.nics]
3008
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3009
                          self._CreateNewNic, self._ApplyNicMods, None)
3010
      # Verify that NIC names are unique and valid
3011
      utils.ValidateDeviceNames("NIC", nics)
3012
      self._new_nics = nics
3013
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3014
    else:
3015
      self._new_nics = None
3016
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3017

    
3018
    if not self.op.ignore_ipolicy:
3019
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3020
                                                              group_info)
3021

    
3022
      # Fill ispec with backend parameters
3023
      ispec[constants.ISPEC_SPINDLE_USE] = \
3024
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3025
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3026
                                                         None)
3027

    
3028
      # Copy ispec to verify parameters with min/max values separately
3029
      if self.op.disk_template:
3030
        new_disk_template = self.op.disk_template
3031
      else:
3032
        new_disk_template = self.instance.disk_template
3033
      ispec_max = ispec.copy()
3034
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3035
        self.be_new.get(constants.BE_MAXMEM, None)
3036
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3037
                                                     new_disk_template)
3038
      ispec_min = ispec.copy()
3039
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3040
        self.be_new.get(constants.BE_MINMEM, None)
3041
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3042
                                                     new_disk_template)
3043

    
3044
      if (res_max or res_min):
3045
        # FIXME: Improve error message by including information about whether
3046
        # the upper or lower limit of the parameter fails the ipolicy.
3047
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3048
               (group_info, group_info.name,
3049
                utils.CommaJoin(set(res_max + res_min))))
3050
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3051

    
3052
  def _ConvertPlainToDrbd(self, feedback_fn):
3053
    """Converts an instance from plain to drbd.
3054

3055
    """
3056
    feedback_fn("Converting template to drbd")
3057
    pnode_uuid = self.instance.primary_node
3058
    snode_uuid = self.op.remote_node_uuid
3059

    
3060
    assert self.instance.disk_template == constants.DT_PLAIN
3061

    
3062
    # create a fake disk info for _GenerateDiskTemplate
3063
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3064
                  constants.IDISK_VG: d.logical_id[0],
3065
                  constants.IDISK_NAME: d.name}
3066
                 for d in self.instance.disks]
3067
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3068
                                     self.instance.uuid, pnode_uuid,
3069
                                     [snode_uuid], disk_info, None, None, 0,
3070
                                     feedback_fn, self.diskparams)
3071
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3072
                                        self.diskparams)
3073
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3074
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3075
    info = GetInstanceInfoText(self.instance)
3076
    feedback_fn("Creating additional volumes...")
3077
    # first, create the missing data and meta devices
3078
    for disk in anno_disks:
3079
      # unfortunately this is... not too nice
3080
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3081
                           info, True, p_excl_stor)
3082
      for child in disk.children:
3083
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3084
                             s_excl_stor)
3085
    # at this stage, all new LVs have been created, we can rename the
3086
    # old ones
3087
    feedback_fn("Renaming original volumes...")
3088
    rename_list = [(o, n.children[0].logical_id)
3089
                   for (o, n) in zip(self.instance.disks, new_disks)]
3090
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3091
    result.Raise("Failed to rename original LVs")
3092

    
3093
    feedback_fn("Initializing DRBD devices...")
3094
    # all child devices are in place, we can now create the DRBD devices
3095
    try:
3096
      for disk in anno_disks:
3097
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3098
                                       (snode_uuid, s_excl_stor)]:
3099
          f_create = node_uuid == pnode_uuid
3100
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3101
                               f_create, excl_stor)
3102
    except errors.GenericError, e:
3103
      feedback_fn("Initializing of DRBD devices failed;"
3104
                  " renaming back original volumes...")
3105
      for disk in new_disks:
3106
        self.cfg.SetDiskID(disk, pnode_uuid)
3107
      rename_back_list = [(n.children[0], o.logical_id)
3108
                          for (n, o) in zip(new_disks, self.instance.disks)]
3109
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3110
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3111
      raise
3112

    
3113
    # at this point, the instance has been modified
3114
    self.instance.disk_template = constants.DT_DRBD8
3115
    self.instance.disks = new_disks
3116
    self.cfg.Update(self.instance, feedback_fn)
3117

    
3118
    # Release node locks while waiting for sync
3119
    ReleaseLocks(self, locking.LEVEL_NODE)
3120

    
3121
    # disks are created, waiting for sync
3122
    disk_abort = not WaitForSync(self, self.instance,
3123
                                 oneshot=not self.op.wait_for_sync)
3124
    if disk_abort:
3125
      raise errors.OpExecError("There are some degraded disks for"
3126
                               " this instance, please cleanup manually")
3127

    
3128
    # Node resource locks will be released by caller
3129

    
3130
  def _ConvertDrbdToPlain(self, feedback_fn):
3131
    """Converts an instance from drbd to plain.
3132

3133
    """
3134
    assert len(self.instance.secondary_nodes) == 1
3135
    assert self.instance.disk_template == constants.DT_DRBD8
3136

    
3137
    pnode_uuid = self.instance.primary_node
3138
    snode_uuid = self.instance.secondary_nodes[0]
3139
    feedback_fn("Converting template to plain")
3140

    
3141
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3142
    new_disks = [d.children[0] for d in self.instance.disks]
3143

    
3144
    # copy over size, mode and name
3145
    for parent, child in zip(old_disks, new_disks):
3146
      child.size = parent.size
3147
      child.mode = parent.mode
3148
      child.name = parent.name
3149

    
3150
    # this is a DRBD disk, return its port to the pool
3151
    # NOTE: this must be done right before the call to cfg.Update!
3152
    for disk in old_disks:
3153
      tcp_port = disk.logical_id[2]
3154
      self.cfg.AddTcpUdpPort(tcp_port)
3155

    
3156
    # update instance structure
3157
    self.instance.disks = new_disks
3158
    self.instance.disk_template = constants.DT_PLAIN
3159
    _UpdateIvNames(0, self.instance.disks)
3160
    self.cfg.Update(self.instance, feedback_fn)
3161

    
3162
    # Release locks in case removing disks takes a while
3163
    ReleaseLocks(self, locking.LEVEL_NODE)
3164

    
3165
    feedback_fn("Removing volumes on the secondary node...")
3166
    for disk in old_disks:
3167
      self.cfg.SetDiskID(disk, snode_uuid)
3168
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3169
      if msg:
3170
        self.LogWarning("Could not remove block device %s on node %s,"
3171
                        " continuing anyway: %s", disk.iv_name,
3172
                        self.cfg.GetNodeName(snode_uuid), msg)
3173

    
3174
    feedback_fn("Removing unneeded volumes on the primary node...")
3175
    for idx, disk in enumerate(old_disks):
3176
      meta = disk.children[1]
3177
      self.cfg.SetDiskID(meta, pnode_uuid)
3178
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3179
      if msg:
3180
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3181
                        " continuing anyway: %s", idx,
3182
                        self.cfg.GetNodeName(pnode_uuid), msg)
3183

    
3184
  def _CreateNewDisk(self, idx, params, _):
3185
    """Creates a new disk.
3186

3187
    """
3188
    # add a new disk
3189
    if self.instance.disk_template in constants.DTS_FILEBASED:
3190
      (file_driver, file_path) = self.instance.disks[0].logical_id
3191
      file_path = os.path.dirname(file_path)
3192
    else:
3193
      file_driver = file_path = None
3194

    
3195
    disk = \
3196
      GenerateDiskTemplate(self, self.instance.disk_template,
3197
                           self.instance.uuid, self.instance.primary_node,
3198
                           self.instance.secondary_nodes, [params], file_path,
3199
                           file_driver, idx, self.Log, self.diskparams)[0]
3200

    
3201
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3202

    
3203
    if self.cluster.prealloc_wipe_disks:
3204
      # Wipe new disk
3205
      WipeOrCleanupDisks(self, self.instance,
3206
                         disks=[(idx, disk, 0)],
3207
                         cleanup=new_disks)
3208

    
3209
    return (disk, [
3210
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3211
      ])
3212

    
3213
  @staticmethod
3214
  def _ModifyDisk(idx, disk, params, _):
3215
    """Modifies a disk.
3216

3217
    """
3218
    changes = []
3219
    mode = params.get(constants.IDISK_MODE, None)
3220
    if mode:
3221
      disk.mode = mode
3222
      changes.append(("disk.mode/%d" % idx, disk.mode))
3223

    
3224
    name = params.get(constants.IDISK_NAME, None)
3225
    disk.name = name
3226
    changes.append(("disk.name/%d" % idx, disk.name))
3227

    
3228
    return changes
3229

    
3230
  def _RemoveDisk(self, idx, root, _):
3231
    """Removes a disk.
3232

3233
    """
3234
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3235
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3236
                             self.instance.primary_node):
3237
      self.cfg.SetDiskID(disk, node_uuid)
3238
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3239
      if msg:
3240
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3241
                        " continuing anyway", idx,
3242
                        self.cfg.GetNodeName(node_uuid), msg)
3243

    
3244
    # if this is a DRBD disk, return its port to the pool
3245
    if root.dev_type in constants.LDS_DRBD:
3246
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3247

    
3248
  def _CreateNewNic(self, idx, params, private):
3249
    """Creates data structure for a new network interface.
3250

3251
    """
3252
    mac = params[constants.INIC_MAC]
3253
    ip = params.get(constants.INIC_IP, None)
3254
    net = params.get(constants.INIC_NETWORK, None)
3255
    name = params.get(constants.INIC_NAME, None)
3256
    net_uuid = self.cfg.LookupNetwork(net)
3257
    #TODO: not private.filled?? can a nic have no nicparams??
3258
    nicparams = private.filled
3259
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3260
                       nicparams=nicparams)
3261
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3262

    
3263
    return (nobj, [
3264
      ("nic.%d" % idx,
3265
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3266
       (mac, ip, private.filled[constants.NIC_MODE],
3267
       private.filled[constants.NIC_LINK],
3268
       net)),
3269
      ])
3270

    
3271
  def _ApplyNicMods(self, idx, nic, params, private):
3272
    """Modifies a network interface.
3273

3274
    """
3275
    changes = []
3276

    
3277
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3278
      if key in params:
3279
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3280
        setattr(nic, key, params[key])
3281

    
3282
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3283
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3284
    if new_net_uuid != nic.network:
3285
      changes.append(("nic.network/%d" % idx, new_net))
3286
      nic.network = new_net_uuid
3287

    
3288
    if private.filled:
3289
      nic.nicparams = private.filled
3290

    
3291
      for (key, val) in nic.nicparams.items():
3292
        changes.append(("nic.%s/%d" % (key, idx), val))
3293

    
3294
    return changes
3295

    
3296
  def Exec(self, feedback_fn):
3297
    """Modifies an instance.
3298

3299
    All parameters take effect only at the next restart of the instance.
3300

3301
    """
3302
    # Process here the warnings from CheckPrereq, as we don't have a
3303
    # feedback_fn there.
3304
    # TODO: Replace with self.LogWarning
3305
    for warn in self.warn:
3306
      feedback_fn("WARNING: %s" % warn)
3307

    
3308
    assert ((self.op.disk_template is None) ^
3309
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3310
      "Not owning any node resource locks"
3311

    
3312
    result = []
3313

    
3314
    # New primary node
3315
    if self.op.pnode_uuid:
3316
      self.instance.primary_node = self.op.pnode_uuid
3317

    
3318
    # runtime memory
3319
    if self.op.runtime_mem:
3320
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3321
                                                     self.instance,
3322
                                                     self.op.runtime_mem)
3323
      rpcres.Raise("Cannot modify instance runtime memory")
3324
      result.append(("runtime_memory", self.op.runtime_mem))
3325

    
3326
    # Apply disk changes
3327
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3328
                        self._CreateNewDisk, self._ModifyDisk,
3329
                        self._RemoveDisk)
3330
    _UpdateIvNames(0, self.instance.disks)
3331

    
3332
    if self.op.disk_template:
3333
      if __debug__:
3334
        check_nodes = set(self.instance.all_nodes)
3335
        if self.op.remote_node_uuid:
3336
          check_nodes.add(self.op.remote_node_uuid)
3337
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3338
          owned = self.owned_locks(level)
3339
          assert not (check_nodes - owned), \
3340
            ("Not owning the correct locks, owning %r, expected at least %r" %
3341
             (owned, check_nodes))
3342

    
3343
      r_shut = ShutdownInstanceDisks(self, self.instance)
3344
      if not r_shut:
3345
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3346
                                 " proceed with disk template conversion")
3347
      mode = (self.instance.disk_template, self.op.disk_template)
3348
      try:
3349
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3350
      except:
3351
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3352
        raise
3353
      result.append(("disk_template", self.op.disk_template))
3354

    
3355
      assert self.instance.disk_template == self.op.disk_template, \
3356
        ("Expected disk template '%s', found '%s'" %
3357
         (self.op.disk_template, self.instance.disk_template))
3358

    
3359
    # Release node and resource locks if there are any (they might already have
3360
    # been released during disk conversion)
3361
    ReleaseLocks(self, locking.LEVEL_NODE)
3362
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3363

    
3364
    # Apply NIC changes
3365
    if self._new_nics is not None:
3366
      self.instance.nics = self._new_nics
3367
      result.extend(self._nic_chgdesc)
3368

    
3369
    # hvparams changes
3370
    if self.op.hvparams:
3371
      self.instance.hvparams = self.hv_inst
3372
      for key, val in self.op.hvparams.iteritems():
3373
        result.append(("hv/%s" % key, val))
3374

    
3375
    # beparams changes
3376
    if self.op.beparams:
3377
      self.instance.beparams = self.be_inst
3378
      for key, val in self.op.beparams.iteritems():
3379
        result.append(("be/%s" % key, val))
3380

    
3381
    # OS change
3382
    if self.op.os_name:
3383
      self.instance.os = self.op.os_name
3384

    
3385
    # osparams changes
3386
    if self.op.osparams:
3387
      self.instance.osparams = self.os_inst
3388
      for key, val in self.op.osparams.iteritems():
3389
        result.append(("os/%s" % key, val))
3390

    
3391
    if self.op.offline is None:
3392
      # Ignore
3393
      pass
3394
    elif self.op.offline:
3395
      # Mark instance as offline
3396
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3397
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3398
    else:
3399
      # Mark instance as online, but stopped
3400
      self.cfg.MarkInstanceDown(self.instance.uuid)
3401
      result.append(("admin_state", constants.ADMINST_DOWN))
3402

    
3403
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3404

    
3405
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3406
                self.owned_locks(locking.LEVEL_NODE)), \
3407
      "All node locks should have been released by now"
3408

    
3409
    return result
3410

    
3411
  _DISK_CONVERSIONS = {
3412
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3413
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3414
    }
3415

    
3416

    
3417
class LUInstanceChangeGroup(LogicalUnit):
3418
  HPATH = "instance-change-group"
3419
  HTYPE = constants.HTYPE_INSTANCE
3420
  REQ_BGL = False
3421

    
3422
  def ExpandNames(self):
3423
    self.share_locks = ShareAll()
3424

    
3425
    self.needed_locks = {
3426
      locking.LEVEL_NODEGROUP: [],
3427
      locking.LEVEL_NODE: [],
3428
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3429
      }
3430

    
3431
    self._ExpandAndLockInstance()
3432

    
3433
    if self.op.target_groups:
3434
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3435
                                  self.op.target_groups)
3436
    else:
3437
      self.req_target_uuids = None
3438

    
3439
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3440

    
3441
  def DeclareLocks(self, level):
3442
    if level == locking.LEVEL_NODEGROUP:
3443
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3444

    
3445
      if self.req_target_uuids:
3446
        lock_groups = set(self.req_target_uuids)
3447

    
3448
        # Lock all groups used by instance optimistically; this requires going
3449
        # via the node before it's locked, requiring verification later on
3450
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3451
        lock_groups.update(instance_groups)
3452
      else:
3453
        # No target groups, need to lock all of them
3454
        lock_groups = locking.ALL_SET
3455

    
3456
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3457

    
3458
    elif level == locking.LEVEL_NODE:
3459
      if self.req_target_uuids:
3460
        # Lock all nodes used by instances
3461
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3462
        self._LockInstancesNodes()
3463

    
3464
        # Lock all nodes in all potential target groups
3465
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3466
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3467
        member_nodes = [node_uuid
3468
                        for group in lock_groups
3469
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3470
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3471
      else:
3472
        # Lock all nodes as all groups are potential targets
3473
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3474

    
3475
  def CheckPrereq(self):
3476
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3477
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3478
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3479

    
3480
    assert (self.req_target_uuids is None or
3481
            owned_groups.issuperset(self.req_target_uuids))
3482
    assert owned_instance_names == set([self.op.instance_name])
3483

    
3484
    # Get instance information
3485
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3486

    
3487
    # Check if node groups for locked instance are still correct
3488
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3489
      ("Instance %s's nodes changed while we kept the lock" %
3490
       self.op.instance_name)
3491

    
3492
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3493
                                          owned_groups)
3494

    
3495
    if self.req_target_uuids:
3496
      # User requested specific target groups
3497
      self.target_uuids = frozenset(self.req_target_uuids)
3498
    else:
3499
      # All groups except those used by the instance are potential targets
3500
      self.target_uuids = owned_groups - inst_groups
3501

    
3502
    conflicting_groups = self.target_uuids & inst_groups
3503
    if conflicting_groups:
3504
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3505
                                 " used by the instance '%s'" %
3506
                                 (utils.CommaJoin(conflicting_groups),
3507
                                  self.op.instance_name),
3508
                                 errors.ECODE_INVAL)
3509

    
3510
    if not self.target_uuids:
3511
      raise errors.OpPrereqError("There are no possible target groups",
3512
                                 errors.ECODE_INVAL)
3513

    
3514
  def BuildHooksEnv(self):
3515
    """Build hooks env.
3516

3517
    """
3518
    assert self.target_uuids
3519

    
3520
    env = {
3521
      "TARGET_GROUPS": " ".join(self.target_uuids),
3522
      }
3523

    
3524
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3525

    
3526
    return env
3527

    
3528
  def BuildHooksNodes(self):
3529
    """Build hooks nodes.
3530

3531
    """
3532
    mn = self.cfg.GetMasterNode()
3533
    return ([mn], [mn])
3534

    
3535
  def Exec(self, feedback_fn):
3536
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3537

    
3538
    assert instances == [self.op.instance_name], "Instance not locked"
3539

    
3540
    req = iallocator.IAReqGroupChange(instances=instances,
3541
                                      target_groups=list(self.target_uuids))
3542
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3543

    
3544
    ial.Run(self.op.iallocator)
3545

    
3546
    if not ial.success:
3547
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3548
                                 " instance '%s' using iallocator '%s': %s" %
3549
                                 (self.op.instance_name, self.op.iallocator,
3550
                                  ial.info), errors.ECODE_NORES)
3551

    
3552
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3553

    
3554
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3555
                 " instance '%s'", len(jobs), self.op.instance_name)
3556

    
3557
    return ResultWithJobs(jobs)