Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ cd3b4ff4

History | View | Annotate | Download (140.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234

    
235
    check_params = cluster.SimpleFillNIC(nicparams)
236
    objects.NIC.CheckParameterSyntax(check_params)
237
    net_uuid = cfg.LookupNetwork(net)
238
    name = nic.get(constants.INIC_NAME, None)
239
    if name is not None and name.lower() == constants.VALUE_NONE:
240
      name = None
241
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242
                          network=net_uuid, nicparams=nicparams)
243
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
244
    nics.append(nic_obj)
245

    
246
  return nics
247

    
248

    
249
def _CheckForConflictingIp(lu, ip, node_uuid):
250
  """In case of conflicting IP address raise error.
251

252
  @type ip: string
253
  @param ip: IP address
254
  @type node_uuid: string
255
  @param node_uuid: node UUID
256

257
  """
258
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259
  if conf_net is not None:
260
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261
                                " network %s, but the target NIC does not." %
262
                                (ip, conf_net)),
263
                               errors.ECODE_STATE)
264

    
265
  return (None, None)
266

    
267

    
268
def _ComputeIPolicyInstanceSpecViolation(
269
  ipolicy, instance_spec, disk_template,
270
  _compute_fn=ComputeIPolicySpecViolation):
271
  """Compute if instance specs meets the specs of ipolicy.
272

273
  @type ipolicy: dict
274
  @param ipolicy: The ipolicy to verify against
275
  @param instance_spec: dict
276
  @param instance_spec: The instance spec to verify
277
  @type disk_template: string
278
  @param disk_template: the disk template of the instance
279
  @param _compute_fn: The function to verify ipolicy (unittest only)
280
  @see: L{ComputeIPolicySpecViolation}
281

282
  """
283
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289

    
290
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291
                     disk_sizes, spindle_use, disk_template)
292

    
293

    
294
def _CheckOSVariant(os_obj, name):
295
  """Check whether an OS name conforms to the os variants specification.
296

297
  @type os_obj: L{objects.OS}
298
  @param os_obj: OS object to check
299
  @type name: string
300
  @param name: OS name passed by the user, to check for validity
301

302
  """
303
  variant = objects.OS.GetVariant(name)
304
  if not os_obj.supported_variants:
305
    if variant:
306
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307
                                 " passed)" % (os_obj.name, variant),
308
                                 errors.ECODE_INVAL)
309
    return
310
  if not variant:
311
    raise errors.OpPrereqError("OS name must include a variant",
312
                               errors.ECODE_INVAL)
313

    
314
  if variant not in os_obj.supported_variants:
315
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316

    
317

    
318
class LUInstanceCreate(LogicalUnit):
319
  """Create an instance.
320

321
  """
322
  HPATH = "instance-add"
323
  HTYPE = constants.HTYPE_INSTANCE
324
  REQ_BGL = False
325

    
326
  def _CheckDiskTemplateValid(self):
327
    """Checks validity of disk template.
328

329
    """
330
    cluster = self.cfg.GetClusterInfo()
331
    if self.op.disk_template is None:
332
      # FIXME: It would be better to take the default disk template from the
333
      # ipolicy, but for the ipolicy we need the primary node, which we get from
334
      # the iallocator, which wants the disk template as input. To solve this
335
      # chicken-and-egg problem, it should be possible to specify just a node
336
      # group from the iallocator and take the ipolicy from that.
337
      self.op.disk_template = cluster.enabled_disk_templates[0]
338
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339

    
340
  def _CheckDiskArguments(self):
341
    """Checks validity of disk-related arguments.
342

343
    """
344
    # check that disk's names are unique and valid
345
    utils.ValidateDeviceNames("disk", self.op.disks)
346

    
347
    self._CheckDiskTemplateValid()
348

    
349
    # check disks. parameter names and consistent adopt/no-adopt strategy
350
    has_adopt = has_no_adopt = False
351
    for disk in self.op.disks:
352
      if self.op.disk_template != constants.DT_EXT:
353
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354
      if constants.IDISK_ADOPT in disk:
355
        has_adopt = True
356
      else:
357
        has_no_adopt = True
358
    if has_adopt and has_no_adopt:
359
      raise errors.OpPrereqError("Either all disks are adopted or none is",
360
                                 errors.ECODE_INVAL)
361
    if has_adopt:
362
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363
        raise errors.OpPrereqError("Disk adoption is not supported for the"
364
                                   " '%s' disk template" %
365
                                   self.op.disk_template,
366
                                   errors.ECODE_INVAL)
367
      if self.op.iallocator is not None:
368
        raise errors.OpPrereqError("Disk adoption not allowed with an"
369
                                   " iallocator script", errors.ECODE_INVAL)
370
      if self.op.mode == constants.INSTANCE_IMPORT:
371
        raise errors.OpPrereqError("Disk adoption not allowed for"
372
                                   " instance import", errors.ECODE_INVAL)
373
    else:
374
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
375
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376
                                   " but no 'adopt' parameter given" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379

    
380
    self.adopt_disks = has_adopt
381

    
382
  def CheckArguments(self):
383
    """Check arguments.
384

385
    """
386
    # do not require name_check to ease forward/backward compatibility
387
    # for tools
388
    if self.op.no_install and self.op.start:
389
      self.LogInfo("No-installation mode selected, disabling startup")
390
      self.op.start = False
391
    # validate/normalize the instance name
392
    self.op.instance_name = \
393
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
394

    
395
    if self.op.ip_check and not self.op.name_check:
396
      # TODO: make the ip check more flexible and not depend on the name check
397
      raise errors.OpPrereqError("Cannot do IP address check without a name"
398
                                 " check", errors.ECODE_INVAL)
399

    
400
    # check nics' parameter names
401
    for nic in self.op.nics:
402
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403
    # check that NIC's parameters names are unique and valid
404
    utils.ValidateDeviceNames("NIC", self.op.nics)
405

    
406
    self._CheckDiskArguments()
407

    
408
    # instance name verification
409
    if self.op.name_check:
410
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411
      self.op.instance_name = self.hostname.name
412
      # used in CheckPrereq for ip ping check
413
      self.check_ip = self.hostname.ip
414
    else:
415
      self.check_ip = None
416

    
417
    # file storage checks
418
    if (self.op.file_driver and
419
        not self.op.file_driver in constants.FILE_DRIVER):
420
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
421
                                 self.op.file_driver, errors.ECODE_INVAL)
422

    
423
    ### Node/iallocator related checks
424
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
425

    
426
    if self.op.pnode is not None:
427
      if self.op.disk_template in constants.DTS_INT_MIRROR:
428
        if self.op.snode is None:
429
          raise errors.OpPrereqError("The networked disk templates need"
430
                                     " a mirror node", errors.ECODE_INVAL)
431
      elif self.op.snode:
432
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
433
                        " template")
434
        self.op.snode = None
435

    
436
    _CheckOpportunisticLocking(self.op)
437

    
438
    self._cds = GetClusterDomainSecret()
439

    
440
    if self.op.mode == constants.INSTANCE_IMPORT:
441
      # On import force_variant must be True, because if we forced it at
442
      # initial install, our only chance when importing it back is that it
443
      # works again!
444
      self.op.force_variant = True
445

    
446
      if self.op.no_install:
447
        self.LogInfo("No-installation mode has no effect during import")
448

    
449
    elif self.op.mode == constants.INSTANCE_CREATE:
450
      if self.op.os_type is None:
451
        raise errors.OpPrereqError("No guest OS specified",
452
                                   errors.ECODE_INVAL)
453
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
454
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
455
                                   " installation" % self.op.os_type,
456
                                   errors.ECODE_STATE)
457
      if self.op.disk_template is None:
458
        raise errors.OpPrereqError("No disk template specified",
459
                                   errors.ECODE_INVAL)
460

    
461
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
462
      # Check handshake to ensure both clusters have the same domain secret
463
      src_handshake = self.op.source_handshake
464
      if not src_handshake:
465
        raise errors.OpPrereqError("Missing source handshake",
466
                                   errors.ECODE_INVAL)
467

    
468
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
469
                                                           src_handshake)
470
      if errmsg:
471
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
472
                                   errors.ECODE_INVAL)
473

    
474
      # Load and check source CA
475
      self.source_x509_ca_pem = self.op.source_x509_ca
476
      if not self.source_x509_ca_pem:
477
        raise errors.OpPrereqError("Missing source X509 CA",
478
                                   errors.ECODE_INVAL)
479

    
480
      try:
481
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
482
                                                    self._cds)
483
      except OpenSSL.crypto.Error, err:
484
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
485
                                   (err, ), errors.ECODE_INVAL)
486

    
487
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
488
      if errcode is not None:
489
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
490
                                   errors.ECODE_INVAL)
491

    
492
      self.source_x509_ca = cert
493

    
494
      src_instance_name = self.op.source_instance_name
495
      if not src_instance_name:
496
        raise errors.OpPrereqError("Missing source instance name",
497
                                   errors.ECODE_INVAL)
498

    
499
      self.source_instance_name = \
500
        netutils.GetHostname(name=src_instance_name).name
501

    
502
    else:
503
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
504
                                 self.op.mode, errors.ECODE_INVAL)
505

    
506
  def ExpandNames(self):
507
    """ExpandNames for CreateInstance.
508

509
    Figure out the right locks for instance creation.
510

511
    """
512
    self.needed_locks = {}
513

    
514
    # this is just a preventive check, but someone might still add this
515
    # instance in the meantime, and creation will fail at lock-add time
516
    if self.op.instance_name in\
517
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
518
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
519
                                 self.op.instance_name, errors.ECODE_EXISTS)
520

    
521
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
522

    
523
    if self.op.iallocator:
524
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
525
      # specifying a group on instance creation and then selecting nodes from
526
      # that group
527
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
528
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
529

    
530
      if self.op.opportunistic_locking:
531
        self.opportunistic_locks[locking.LEVEL_NODE] = True
532
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
533
    else:
534
      (self.op.pnode_uuid, self.op.pnode) = \
535
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
536
      nodelist = [self.op.pnode_uuid]
537
      if self.op.snode is not None:
538
        (self.op.snode_uuid, self.op.snode) = \
539
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
540
        nodelist.append(self.op.snode_uuid)
541
      self.needed_locks[locking.LEVEL_NODE] = nodelist
542

    
543
    # in case of import lock the source node too
544
    if self.op.mode == constants.INSTANCE_IMPORT:
545
      src_node = self.op.src_node
546
      src_path = self.op.src_path
547

    
548
      if src_path is None:
549
        self.op.src_path = src_path = self.op.instance_name
550

    
551
      if src_node is None:
552
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
553
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
554
        self.op.src_node = None
555
        if os.path.isabs(src_path):
556
          raise errors.OpPrereqError("Importing an instance from a path"
557
                                     " requires a source node option",
558
                                     errors.ECODE_INVAL)
559
      else:
560
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
561
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
562
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
563
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
564
        if not os.path.isabs(src_path):
565
          self.op.src_path = \
566
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
567

    
568
    self.needed_locks[locking.LEVEL_NODE_RES] = \
569
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
570

    
571
  def _RunAllocator(self):
572
    """Run the allocator based on input opcode.
573

574
    """
575
    if self.op.opportunistic_locking:
576
      # Only consider nodes for which a lock is held
577
      node_name_whitelist = self.cfg.GetNodeNames(
578
        self.owned_locks(locking.LEVEL_NODE))
579
    else:
580
      node_name_whitelist = None
581

    
582
    #TODO Export network to iallocator so that it chooses a pnode
583
    #     in a nodegroup that has the desired network connected to
584
    req = _CreateInstanceAllocRequest(self.op, self.disks,
585
                                      self.nics, self.be_full,
586
                                      node_name_whitelist)
587
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
588

    
589
    ial.Run(self.op.iallocator)
590

    
591
    if not ial.success:
592
      # When opportunistic locks are used only a temporary failure is generated
593
      if self.op.opportunistic_locking:
594
        ecode = errors.ECODE_TEMP_NORES
595
      else:
596
        ecode = errors.ECODE_NORES
597

    
598
      raise errors.OpPrereqError("Can't compute nodes using"
599
                                 " iallocator '%s': %s" %
600
                                 (self.op.iallocator, ial.info),
601
                                 ecode)
602

    
603
    (self.op.pnode_uuid, self.op.pnode) = \
604
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
605
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
606
                 self.op.instance_name, self.op.iallocator,
607
                 utils.CommaJoin(ial.result))
608

    
609
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
610

    
611
    if req.RequiredNodes() == 2:
612
      (self.op.snode_uuid, self.op.snode) = \
613
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
614

    
615
  def BuildHooksEnv(self):
616
    """Build hooks env.
617

618
    This runs on master, primary and secondary nodes of the instance.
619

620
    """
621
    env = {
622
      "ADD_MODE": self.op.mode,
623
      }
624
    if self.op.mode == constants.INSTANCE_IMPORT:
625
      env["SRC_NODE"] = self.op.src_node
626
      env["SRC_PATH"] = self.op.src_path
627
      env["SRC_IMAGES"] = self.src_images
628

    
629
    env.update(BuildInstanceHookEnv(
630
      name=self.op.instance_name,
631
      primary_node_name=self.op.pnode,
632
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
633
      status=self.op.start,
634
      os_type=self.op.os_type,
635
      minmem=self.be_full[constants.BE_MINMEM],
636
      maxmem=self.be_full[constants.BE_MAXMEM],
637
      vcpus=self.be_full[constants.BE_VCPUS],
638
      nics=NICListToTuple(self, self.nics),
639
      disk_template=self.op.disk_template,
640
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
641
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
642
             for d in self.disks],
643
      bep=self.be_full,
644
      hvp=self.hv_full,
645
      hypervisor_name=self.op.hypervisor,
646
      tags=self.op.tags,
647
      ))
648

    
649
    return env
650

    
651
  def BuildHooksNodes(self):
652
    """Build hooks nodes.
653

654
    """
655
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
656
    return nl, nl
657

    
658
  def _ReadExportInfo(self):
659
    """Reads the export information from disk.
660

661
    It will override the opcode source node and path with the actual
662
    information, if these two were not specified before.
663

664
    @return: the export information
665

666
    """
667
    assert self.op.mode == constants.INSTANCE_IMPORT
668

    
669
    if self.op.src_node_uuid is None:
670
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
671
      exp_list = self.rpc.call_export_list(locked_nodes)
672
      found = False
673
      for node_uuid in exp_list:
674
        if exp_list[node_uuid].fail_msg:
675
          continue
676
        if self.op.src_path in exp_list[node_uuid].payload:
677
          found = True
678
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
679
          self.op.src_node_uuid = node_uuid
680
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
681
                                            self.op.src_path)
682
          break
683
      if not found:
684
        raise errors.OpPrereqError("No export found for relative path %s" %
685
                                   self.op.src_path, errors.ECODE_INVAL)
686

    
687
    CheckNodeOnline(self, self.op.src_node_uuid)
688
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
689
    result.Raise("No export or invalid export found in dir %s" %
690
                 self.op.src_path)
691

    
692
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
693
    if not export_info.has_section(constants.INISECT_EXP):
694
      raise errors.ProgrammerError("Corrupted export config",
695
                                   errors.ECODE_ENVIRON)
696

    
697
    ei_version = export_info.get(constants.INISECT_EXP, "version")
698
    if int(ei_version) != constants.EXPORT_VERSION:
699
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
700
                                 (ei_version, constants.EXPORT_VERSION),
701
                                 errors.ECODE_ENVIRON)
702
    return export_info
703

    
704
  def _ReadExportParams(self, einfo):
705
    """Use export parameters as defaults.
706

707
    In case the opcode doesn't specify (as in override) some instance
708
    parameters, then try to use them from the export information, if
709
    that declares them.
710

711
    """
712
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
713

    
714
    if not self.op.disks:
715
      disks = []
716
      # TODO: import the disk iv_name too
717
      for idx in range(constants.MAX_DISKS):
718
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
719
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
720
          disks.append({constants.IDISK_SIZE: disk_sz})
721
      self.op.disks = disks
722
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
723
        raise errors.OpPrereqError("No disk info specified and the export"
724
                                   " is missing the disk information",
725
                                   errors.ECODE_INVAL)
726

    
727
    if not self.op.nics:
728
      nics = []
729
      for idx in range(constants.MAX_NICS):
730
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
731
          ndict = {}
732
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
733
            nic_param_name = "nic%d_%s" % (idx, name)
734
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
735
              v = einfo.get(constants.INISECT_INS, nic_param_name)
736
              ndict[name] = v
737
          nics.append(ndict)
738
        else:
739
          break
740
      self.op.nics = nics
741

    
742
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
743
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
744

    
745
    if (self.op.hypervisor is None and
746
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
747
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
748

    
749
    if einfo.has_section(constants.INISECT_HYP):
750
      # use the export parameters but do not override the ones
751
      # specified by the user
752
      for name, value in einfo.items(constants.INISECT_HYP):
753
        if name not in self.op.hvparams:
754
          self.op.hvparams[name] = value
755

    
756
    if einfo.has_section(constants.INISECT_BEP):
757
      # use the parameters, without overriding
758
      for name, value in einfo.items(constants.INISECT_BEP):
759
        if name not in self.op.beparams:
760
          self.op.beparams[name] = value
761
        # Compatibility for the old "memory" be param
762
        if name == constants.BE_MEMORY:
763
          if constants.BE_MAXMEM not in self.op.beparams:
764
            self.op.beparams[constants.BE_MAXMEM] = value
765
          if constants.BE_MINMEM not in self.op.beparams:
766
            self.op.beparams[constants.BE_MINMEM] = value
767
    else:
768
      # try to read the parameters old style, from the main section
769
      for name in constants.BES_PARAMETERS:
770
        if (name not in self.op.beparams and
771
            einfo.has_option(constants.INISECT_INS, name)):
772
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
773

    
774
    if einfo.has_section(constants.INISECT_OSP):
775
      # use the parameters, without overriding
776
      for name, value in einfo.items(constants.INISECT_OSP):
777
        if name not in self.op.osparams:
778
          self.op.osparams[name] = value
779

    
780
  def _RevertToDefaults(self, cluster):
781
    """Revert the instance parameters to the default values.
782

783
    """
784
    # hvparams
785
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
786
    for name in self.op.hvparams.keys():
787
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
788
        del self.op.hvparams[name]
789
    # beparams
790
    be_defs = cluster.SimpleFillBE({})
791
    for name in self.op.beparams.keys():
792
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
793
        del self.op.beparams[name]
794
    # nic params
795
    nic_defs = cluster.SimpleFillNIC({})
796
    for nic in self.op.nics:
797
      for name in constants.NICS_PARAMETERS:
798
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
799
          del nic[name]
800
    # osparams
801
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
802
    for name in self.op.osparams.keys():
803
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
804
        del self.op.osparams[name]
805

    
806
  def _CalculateFileStorageDir(self):
807
    """Calculate final instance file storage dir.
808

809
    """
810
    # file storage dir calculation/check
811
    self.instance_file_storage_dir = None
812
    if self.op.disk_template in constants.DTS_FILEBASED:
813
      # build the full file storage dir path
814
      joinargs = []
815

    
816
      if self.op.disk_template == constants.DT_SHARED_FILE:
817
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
818
      else:
819
        get_fsd_fn = self.cfg.GetFileStorageDir
820

    
821
      cfg_storagedir = get_fsd_fn()
822
      if not cfg_storagedir:
823
        raise errors.OpPrereqError("Cluster file storage dir not defined",
824
                                   errors.ECODE_STATE)
825
      joinargs.append(cfg_storagedir)
826

    
827
      if self.op.file_storage_dir is not None:
828
        joinargs.append(self.op.file_storage_dir)
829

    
830
      joinargs.append(self.op.instance_name)
831

    
832
      # pylint: disable=W0142
833
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
834

    
835
  def CheckPrereq(self): # pylint: disable=R0914
836
    """Check prerequisites.
837

838
    """
839
    self._CalculateFileStorageDir()
840

    
841
    if self.op.mode == constants.INSTANCE_IMPORT:
842
      export_info = self._ReadExportInfo()
843
      self._ReadExportParams(export_info)
844
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
845
    else:
846
      self._old_instance_name = None
847

    
848
    if (not self.cfg.GetVGName() and
849
        self.op.disk_template not in constants.DTS_NOT_LVM):
850
      raise errors.OpPrereqError("Cluster does not support lvm-based"
851
                                 " instances", errors.ECODE_STATE)
852

    
853
    if (self.op.hypervisor is None or
854
        self.op.hypervisor == constants.VALUE_AUTO):
855
      self.op.hypervisor = self.cfg.GetHypervisorType()
856

    
857
    cluster = self.cfg.GetClusterInfo()
858
    enabled_hvs = cluster.enabled_hypervisors
859
    if self.op.hypervisor not in enabled_hvs:
860
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
861
                                 " cluster (%s)" %
862
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
863
                                 errors.ECODE_STATE)
864

    
865
    # Check tag validity
866
    for tag in self.op.tags:
867
      objects.TaggableObject.ValidateTag(tag)
868

    
869
    # check hypervisor parameter syntax (locally)
870
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
871
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
872
                                      self.op.hvparams)
873
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
874
    hv_type.CheckParameterSyntax(filled_hvp)
875
    self.hv_full = filled_hvp
876
    # check that we don't specify global parameters on an instance
877
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
878
                         "instance", "cluster")
879

    
880
    # fill and remember the beparams dict
881
    self.be_full = _ComputeFullBeParams(self.op, cluster)
882

    
883
    # build os parameters
884
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
885

    
886
    # now that hvp/bep are in final format, let's reset to defaults,
887
    # if told to do so
888
    if self.op.identify_defaults:
889
      self._RevertToDefaults(cluster)
890

    
891
    # NIC buildup
892
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
893
                             self.proc.GetECId())
894

    
895
    # disk checks/pre-build
896
    default_vg = self.cfg.GetVGName()
897
    self.disks = ComputeDisks(self.op, default_vg)
898

    
899
    if self.op.mode == constants.INSTANCE_IMPORT:
900
      disk_images = []
901
      for idx in range(len(self.disks)):
902
        option = "disk%d_dump" % idx
903
        if export_info.has_option(constants.INISECT_INS, option):
904
          # FIXME: are the old os-es, disk sizes, etc. useful?
905
          export_name = export_info.get(constants.INISECT_INS, option)
906
          image = utils.PathJoin(self.op.src_path, export_name)
907
          disk_images.append(image)
908
        else:
909
          disk_images.append(False)
910

    
911
      self.src_images = disk_images
912

    
913
      if self.op.instance_name == self._old_instance_name:
914
        for idx, nic in enumerate(self.nics):
915
          if nic.mac == constants.VALUE_AUTO:
916
            nic_mac_ini = "nic%d_mac" % idx
917
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
918

    
919
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
920

    
921
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
922
    if self.op.ip_check:
923
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
924
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
925
                                   (self.check_ip, self.op.instance_name),
926
                                   errors.ECODE_NOTUNIQUE)
927

    
928
    #### mac address generation
929
    # By generating here the mac address both the allocator and the hooks get
930
    # the real final mac address rather than the 'auto' or 'generate' value.
931
    # There is a race condition between the generation and the instance object
932
    # creation, which means that we know the mac is valid now, but we're not
933
    # sure it will be when we actually add the instance. If things go bad
934
    # adding the instance will abort because of a duplicate mac, and the
935
    # creation job will fail.
936
    for nic in self.nics:
937
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
938
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
939

    
940
    #### allocator run
941

    
942
    if self.op.iallocator is not None:
943
      self._RunAllocator()
944

    
945
    # Release all unneeded node locks
946
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
947
                               self.op.src_node_uuid])
948
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
949
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
950
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
951

    
952
    assert (self.owned_locks(locking.LEVEL_NODE) ==
953
            self.owned_locks(locking.LEVEL_NODE_RES)), \
954
      "Node locks differ from node resource locks"
955

    
956
    #### node related checks
957

    
958
    # check primary node
959
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
960
    assert self.pnode is not None, \
961
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
962
    if pnode.offline:
963
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
964
                                 pnode.name, errors.ECODE_STATE)
965
    if pnode.drained:
966
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
967
                                 pnode.name, errors.ECODE_STATE)
968
    if not pnode.vm_capable:
969
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
970
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
971

    
972
    self.secondaries = []
973

    
974
    # Fill in any IPs from IP pools. This must happen here, because we need to
975
    # know the nic's primary node, as specified by the iallocator
976
    for idx, nic in enumerate(self.nics):
977
      net_uuid = nic.network
978
      if net_uuid is not None:
979
        nobj = self.cfg.GetNetwork(net_uuid)
980
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
981
        if netparams is None:
982
          raise errors.OpPrereqError("No netparams found for network"
983
                                     " %s. Propably not connected to"
984
                                     " node's %s nodegroup" %
985
                                     (nobj.name, self.pnode.name),
986
                                     errors.ECODE_INVAL)
987
        self.LogInfo("NIC/%d inherits netparams %s" %
988
                     (idx, netparams.values()))
989
        nic.nicparams = dict(netparams)
990
        if nic.ip is not None:
991
          if nic.ip.lower() == constants.NIC_IP_POOL:
992
            try:
993
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
994
            except errors.ReservationError:
995
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
996
                                         " from the address pool" % idx,
997
                                         errors.ECODE_STATE)
998
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
999
          else:
1000
            try:
1001
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1002
            except errors.ReservationError:
1003
              raise errors.OpPrereqError("IP address %s already in use"
1004
                                         " or does not belong to network %s" %
1005
                                         (nic.ip, nobj.name),
1006
                                         errors.ECODE_NOTUNIQUE)
1007

    
1008
      # net is None, ip None or given
1009
      elif self.op.conflicts_check:
1010
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1011

    
1012
    # mirror node verification
1013
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1014
      if self.op.snode_uuid == pnode.uuid:
1015
        raise errors.OpPrereqError("The secondary node cannot be the"
1016
                                   " primary node", errors.ECODE_INVAL)
1017
      CheckNodeOnline(self, self.op.snode_uuid)
1018
      CheckNodeNotDrained(self, self.op.snode_uuid)
1019
      CheckNodeVmCapable(self, self.op.snode_uuid)
1020
      self.secondaries.append(self.op.snode_uuid)
1021

    
1022
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1023
      if pnode.group != snode.group:
1024
        self.LogWarning("The primary and secondary nodes are in two"
1025
                        " different node groups; the disk parameters"
1026
                        " from the first disk's node group will be"
1027
                        " used")
1028

    
1029
    nodes = [pnode]
1030
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1031
      nodes.append(snode)
1032
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1033
    excl_stor = compat.any(map(has_es, nodes))
1034
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1035
      raise errors.OpPrereqError("Disk template %s not supported with"
1036
                                 " exclusive storage" % self.op.disk_template,
1037
                                 errors.ECODE_STATE)
1038
    for disk in self.disks:
1039
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1040

    
1041
    node_uuids = [pnode.uuid] + self.secondaries
1042

    
1043
    if not self.adopt_disks:
1044
      if self.op.disk_template == constants.DT_RBD:
1045
        # _CheckRADOSFreeSpace() is just a placeholder.
1046
        # Any function that checks prerequisites can be placed here.
1047
        # Check if there is enough space on the RADOS cluster.
1048
        CheckRADOSFreeSpace()
1049
      elif self.op.disk_template == constants.DT_EXT:
1050
        # FIXME: Function that checks prereqs if needed
1051
        pass
1052
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1053
        # Check lv size requirements, if not adopting
1054
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1055
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1056
      else:
1057
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1058
        pass
1059

    
1060
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1061
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1062
                                disk[constants.IDISK_ADOPT])
1063
                     for disk in self.disks])
1064
      if len(all_lvs) != len(self.disks):
1065
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1066
                                   errors.ECODE_INVAL)
1067
      for lv_name in all_lvs:
1068
        try:
1069
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1070
          # to ReserveLV uses the same syntax
1071
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1072
        except errors.ReservationError:
1073
          raise errors.OpPrereqError("LV named %s used by another instance" %
1074
                                     lv_name, errors.ECODE_NOTUNIQUE)
1075

    
1076
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1077
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1078

    
1079
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1080
                                       vg_names.payload.keys())[pnode.uuid]
1081
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1082
      node_lvs = node_lvs.payload
1083

    
1084
      delta = all_lvs.difference(node_lvs.keys())
1085
      if delta:
1086
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1087
                                   utils.CommaJoin(delta),
1088
                                   errors.ECODE_INVAL)
1089
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1090
      if online_lvs:
1091
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1092
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1093
                                   errors.ECODE_STATE)
1094
      # update the size of disk based on what is found
1095
      for dsk in self.disks:
1096
        dsk[constants.IDISK_SIZE] = \
1097
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1098
                                        dsk[constants.IDISK_ADOPT])][0]))
1099

    
1100
    elif self.op.disk_template == constants.DT_BLOCK:
1101
      # Normalize and de-duplicate device paths
1102
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1103
                       for disk in self.disks])
1104
      if len(all_disks) != len(self.disks):
1105
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1106
                                   errors.ECODE_INVAL)
1107
      baddisks = [d for d in all_disks
1108
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1109
      if baddisks:
1110
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1111
                                   " cannot be adopted" %
1112
                                   (utils.CommaJoin(baddisks),
1113
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1114
                                   errors.ECODE_INVAL)
1115

    
1116
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1117
                                            list(all_disks))[pnode.uuid]
1118
      node_disks.Raise("Cannot get block device information from node %s" %
1119
                       pnode.name)
1120
      node_disks = node_disks.payload
1121
      delta = all_disks.difference(node_disks.keys())
1122
      if delta:
1123
        raise errors.OpPrereqError("Missing block device(s): %s" %
1124
                                   utils.CommaJoin(delta),
1125
                                   errors.ECODE_INVAL)
1126
      for dsk in self.disks:
1127
        dsk[constants.IDISK_SIZE] = \
1128
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1129

    
1130
    # Verify instance specs
1131
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1132
    ispec = {
1133
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1134
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1135
      constants.ISPEC_DISK_COUNT: len(self.disks),
1136
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1137
                                  for disk in self.disks],
1138
      constants.ISPEC_NIC_COUNT: len(self.nics),
1139
      constants.ISPEC_SPINDLE_USE: spindle_use,
1140
      }
1141

    
1142
    group_info = self.cfg.GetNodeGroup(pnode.group)
1143
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1144
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1145
                                               self.op.disk_template)
1146
    if not self.op.ignore_ipolicy and res:
1147
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1148
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1149
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1150

    
1151
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1152

    
1153
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1154
    # check OS parameters (remotely)
1155
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1156

    
1157
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1158

    
1159
    #TODO: _CheckExtParams (remotely)
1160
    # Check parameters for extstorage
1161

    
1162
    # memory check on primary node
1163
    #TODO(dynmem): use MINMEM for checking
1164
    if self.op.start:
1165
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1166
                                self.op.hvparams)
1167
      CheckNodeFreeMemory(self, self.pnode.uuid,
1168
                          "creating instance %s" % self.op.instance_name,
1169
                          self.be_full[constants.BE_MAXMEM],
1170
                          self.op.hypervisor, hvfull)
1171

    
1172
    self.dry_run_result = list(node_uuids)
1173

    
1174
  def Exec(self, feedback_fn):
1175
    """Create and add the instance to the cluster.
1176

1177
    """
1178
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1179
                self.owned_locks(locking.LEVEL_NODE)), \
1180
      "Node locks differ from node resource locks"
1181
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1182

    
1183
    ht_kind = self.op.hypervisor
1184
    if ht_kind in constants.HTS_REQ_PORT:
1185
      network_port = self.cfg.AllocatePort()
1186
    else:
1187
      network_port = None
1188

    
1189
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1190

    
1191
    # This is ugly but we got a chicken-egg problem here
1192
    # We can only take the group disk parameters, as the instance
1193
    # has no disks yet (we are generating them right here).
1194
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1195
    disks = GenerateDiskTemplate(self,
1196
                                 self.op.disk_template,
1197
                                 instance_uuid, self.pnode.uuid,
1198
                                 self.secondaries,
1199
                                 self.disks,
1200
                                 self.instance_file_storage_dir,
1201
                                 self.op.file_driver,
1202
                                 0,
1203
                                 feedback_fn,
1204
                                 self.cfg.GetGroupDiskParams(nodegroup))
1205

    
1206
    iobj = objects.Instance(name=self.op.instance_name,
1207
                            uuid=instance_uuid,
1208
                            os=self.op.os_type,
1209
                            primary_node=self.pnode.uuid,
1210
                            nics=self.nics, disks=disks,
1211
                            disk_template=self.op.disk_template,
1212
                            disks_active=False,
1213
                            admin_state=constants.ADMINST_DOWN,
1214
                            network_port=network_port,
1215
                            beparams=self.op.beparams,
1216
                            hvparams=self.op.hvparams,
1217
                            hypervisor=self.op.hypervisor,
1218
                            osparams=self.op.osparams,
1219
                            )
1220

    
1221
    if self.op.tags:
1222
      for tag in self.op.tags:
1223
        iobj.AddTag(tag)
1224

    
1225
    if self.adopt_disks:
1226
      if self.op.disk_template == constants.DT_PLAIN:
1227
        # rename LVs to the newly-generated names; we need to construct
1228
        # 'fake' LV disks with the old data, plus the new unique_id
1229
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1230
        rename_to = []
1231
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1232
          rename_to.append(t_dsk.logical_id)
1233
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1234
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1235
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1236
                                               zip(tmp_disks, rename_to))
1237
        result.Raise("Failed to rename adoped LVs")
1238
    else:
1239
      feedback_fn("* creating instance disks...")
1240
      try:
1241
        CreateDisks(self, iobj)
1242
      except errors.OpExecError:
1243
        self.LogWarning("Device creation failed")
1244
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1245
        raise
1246

    
1247
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1248

    
1249
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1250

    
1251
    # Declare that we don't want to remove the instance lock anymore, as we've
1252
    # added the instance to the config
1253
    del self.remove_locks[locking.LEVEL_INSTANCE]
1254

    
1255
    if self.op.mode == constants.INSTANCE_IMPORT:
1256
      # Release unused nodes
1257
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1258
    else:
1259
      # Release all nodes
1260
      ReleaseLocks(self, locking.LEVEL_NODE)
1261

    
1262
    disk_abort = False
1263
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1264
      feedback_fn("* wiping instance disks...")
1265
      try:
1266
        WipeDisks(self, iobj)
1267
      except errors.OpExecError, err:
1268
        logging.exception("Wiping disks failed")
1269
        self.LogWarning("Wiping instance disks failed (%s)", err)
1270
        disk_abort = True
1271

    
1272
    if disk_abort:
1273
      # Something is already wrong with the disks, don't do anything else
1274
      pass
1275
    elif self.op.wait_for_sync:
1276
      disk_abort = not WaitForSync(self, iobj)
1277
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1278
      # make sure the disks are not degraded (still sync-ing is ok)
1279
      feedback_fn("* checking mirrors status")
1280
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1281
    else:
1282
      disk_abort = False
1283

    
1284
    if disk_abort:
1285
      RemoveDisks(self, iobj)
1286
      self.cfg.RemoveInstance(iobj.uuid)
1287
      # Make sure the instance lock gets removed
1288
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1289
      raise errors.OpExecError("There are some degraded disks for"
1290
                               " this instance")
1291

    
1292
    # instance disks are now active
1293
    iobj.disks_active = True
1294

    
1295
    # Release all node resource locks
1296
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1297

    
1298
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1299
      # we need to set the disks ID to the primary node, since the
1300
      # preceding code might or might have not done it, depending on
1301
      # disk template and other options
1302
      for disk in iobj.disks:
1303
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1304
      if self.op.mode == constants.INSTANCE_CREATE:
1305
        if not self.op.no_install:
1306
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1307
                        not self.op.wait_for_sync)
1308
          if pause_sync:
1309
            feedback_fn("* pausing disk sync to install instance OS")
1310
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1311
                                                              (iobj.disks,
1312
                                                               iobj), True)
1313
            for idx, success in enumerate(result.payload):
1314
              if not success:
1315
                logging.warn("pause-sync of instance %s for disk %d failed",
1316
                             self.op.instance_name, idx)
1317

    
1318
          feedback_fn("* running the instance OS create scripts...")
1319
          # FIXME: pass debug option from opcode to backend
1320
          os_add_result = \
1321
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1322
                                          self.op.debug_level)
1323
          if pause_sync:
1324
            feedback_fn("* resuming disk sync")
1325
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1326
                                                              (iobj.disks,
1327
                                                               iobj), False)
1328
            for idx, success in enumerate(result.payload):
1329
              if not success:
1330
                logging.warn("resume-sync of instance %s for disk %d failed",
1331
                             self.op.instance_name, idx)
1332

    
1333
          os_add_result.Raise("Could not add os for instance %s"
1334
                              " on node %s" % (self.op.instance_name,
1335
                                               self.pnode.name))
1336

    
1337
      else:
1338
        if self.op.mode == constants.INSTANCE_IMPORT:
1339
          feedback_fn("* running the instance OS import scripts...")
1340

    
1341
          transfers = []
1342

    
1343
          for idx, image in enumerate(self.src_images):
1344
            if not image:
1345
              continue
1346

    
1347
            # FIXME: pass debug option from opcode to backend
1348
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1349
                                               constants.IEIO_FILE, (image, ),
1350
                                               constants.IEIO_SCRIPT,
1351
                                               (iobj.disks[idx], idx),
1352
                                               None)
1353
            transfers.append(dt)
1354

    
1355
          import_result = \
1356
            masterd.instance.TransferInstanceData(self, feedback_fn,
1357
                                                  self.op.src_node_uuid,
1358
                                                  self.pnode.uuid,
1359
                                                  self.pnode.secondary_ip,
1360
                                                  iobj, transfers)
1361
          if not compat.all(import_result):
1362
            self.LogWarning("Some disks for instance %s on node %s were not"
1363
                            " imported successfully" % (self.op.instance_name,
1364
                                                        self.pnode.name))
1365

    
1366
          rename_from = self._old_instance_name
1367

    
1368
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1369
          feedback_fn("* preparing remote import...")
1370
          # The source cluster will stop the instance before attempting to make
1371
          # a connection. In some cases stopping an instance can take a long
1372
          # time, hence the shutdown timeout is added to the connection
1373
          # timeout.
1374
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1375
                             self.op.source_shutdown_timeout)
1376
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1377

    
1378
          assert iobj.primary_node == self.pnode.uuid
1379
          disk_results = \
1380
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1381
                                          self.source_x509_ca,
1382
                                          self._cds, timeouts)
1383
          if not compat.all(disk_results):
1384
            # TODO: Should the instance still be started, even if some disks
1385
            # failed to import (valid for local imports, too)?
1386
            self.LogWarning("Some disks for instance %s on node %s were not"
1387
                            " imported successfully" % (self.op.instance_name,
1388
                                                        self.pnode.name))
1389

    
1390
          rename_from = self.source_instance_name
1391

    
1392
        else:
1393
          # also checked in the prereq part
1394
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1395
                                       % self.op.mode)
1396

    
1397
        # Run rename script on newly imported instance
1398
        assert iobj.name == self.op.instance_name
1399
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1400
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1401
                                                   rename_from,
1402
                                                   self.op.debug_level)
1403
        result.Warn("Failed to run rename script for %s on node %s" %
1404
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1405

    
1406
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1407

    
1408
    if self.op.start:
1409
      iobj.admin_state = constants.ADMINST_UP
1410
      self.cfg.Update(iobj, feedback_fn)
1411
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1412
                   self.pnode.name)
1413
      feedback_fn("* starting instance...")
1414
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1415
                                            False, self.op.reason)
1416
      result.Raise("Could not start instance")
1417

    
1418
    return list(iobj.all_nodes)
1419

    
1420

    
1421
class LUInstanceRename(LogicalUnit):
1422
  """Rename an instance.
1423

1424
  """
1425
  HPATH = "instance-rename"
1426
  HTYPE = constants.HTYPE_INSTANCE
1427

    
1428
  def CheckArguments(self):
1429
    """Check arguments.
1430

1431
    """
1432
    if self.op.ip_check and not self.op.name_check:
1433
      # TODO: make the ip check more flexible and not depend on the name check
1434
      raise errors.OpPrereqError("IP address check requires a name check",
1435
                                 errors.ECODE_INVAL)
1436

    
1437
  def BuildHooksEnv(self):
1438
    """Build hooks env.
1439

1440
    This runs on master, primary and secondary nodes of the instance.
1441

1442
    """
1443
    env = BuildInstanceHookEnvByObject(self, self.instance)
1444
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1445
    return env
1446

    
1447
  def BuildHooksNodes(self):
1448
    """Build hooks nodes.
1449

1450
    """
1451
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1452
    return (nl, nl)
1453

    
1454
  def CheckPrereq(self):
1455
    """Check prerequisites.
1456

1457
    This checks that the instance is in the cluster and is not running.
1458

1459
    """
1460
    (self.op.instance_uuid, self.op.instance_name) = \
1461
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1462
                                self.op.instance_name)
1463
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1464
    assert instance is not None
1465

    
1466
    # It should actually not happen that an instance is running with a disabled
1467
    # disk template, but in case it does, the renaming of file-based instances
1468
    # will fail horribly. Thus, we test it before.
1469
    if (instance.disk_template in constants.DTS_FILEBASED and
1470
        self.op.new_name != instance.name):
1471
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1472
                               instance.disk_template)
1473

    
1474
    CheckNodeOnline(self, instance.primary_node)
1475
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1476
                       msg="cannot rename")
1477
    self.instance = instance
1478

    
1479
    new_name = self.op.new_name
1480
    if self.op.name_check:
1481
      hostname = _CheckHostnameSane(self, new_name)
1482
      new_name = self.op.new_name = hostname.name
1483
      if (self.op.ip_check and
1484
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1485
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1486
                                   (hostname.ip, new_name),
1487
                                   errors.ECODE_NOTUNIQUE)
1488

    
1489
    instance_names = [inst.name for
1490
                      inst in self.cfg.GetAllInstancesInfo().values()]
1491
    if new_name in instance_names and new_name != instance.name:
1492
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1493
                                 new_name, errors.ECODE_EXISTS)
1494

    
1495
  def Exec(self, feedback_fn):
1496
    """Rename the instance.
1497

1498
    """
1499
    old_name = self.instance.name
1500

    
1501
    rename_file_storage = False
1502
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1503
        self.op.new_name != self.instance.name):
1504
      old_file_storage_dir = os.path.dirname(
1505
                               self.instance.disks[0].logical_id[1])
1506
      rename_file_storage = True
1507

    
1508
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1509
    # Change the instance lock. This is definitely safe while we hold the BGL.
1510
    # Otherwise the new lock would have to be added in acquired mode.
1511
    assert self.REQ_BGL
1512
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1513
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1514
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1515

    
1516
    # re-read the instance from the configuration after rename
1517
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1518

    
1519
    if rename_file_storage:
1520
      new_file_storage_dir = os.path.dirname(
1521
                               renamed_inst.disks[0].logical_id[1])
1522
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1523
                                                     old_file_storage_dir,
1524
                                                     new_file_storage_dir)
1525
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1526
                   " (but the instance has been renamed in Ganeti)" %
1527
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1528
                    old_file_storage_dir, new_file_storage_dir))
1529

    
1530
    StartInstanceDisks(self, renamed_inst, None)
1531
    # update info on disks
1532
    info = GetInstanceInfoText(renamed_inst)
1533
    for (idx, disk) in enumerate(renamed_inst.disks):
1534
      for node_uuid in renamed_inst.all_nodes:
1535
        self.cfg.SetDiskID(disk, node_uuid)
1536
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1537
        result.Warn("Error setting info on node %s for disk %s" %
1538
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1539
    try:
1540
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1541
                                                 renamed_inst, old_name,
1542
                                                 self.op.debug_level)
1543
      result.Warn("Could not run OS rename script for instance %s on node %s"
1544
                  " (but the instance has been renamed in Ganeti)" %
1545
                  (renamed_inst.name,
1546
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1547
                  self.LogWarning)
1548
    finally:
1549
      ShutdownInstanceDisks(self, renamed_inst)
1550

    
1551
    return renamed_inst.name
1552

    
1553

    
1554
class LUInstanceRemove(LogicalUnit):
1555
  """Remove an instance.
1556

1557
  """
1558
  HPATH = "instance-remove"
1559
  HTYPE = constants.HTYPE_INSTANCE
1560
  REQ_BGL = False
1561

    
1562
  def ExpandNames(self):
1563
    self._ExpandAndLockInstance()
1564
    self.needed_locks[locking.LEVEL_NODE] = []
1565
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1566
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1567

    
1568
  def DeclareLocks(self, level):
1569
    if level == locking.LEVEL_NODE:
1570
      self._LockInstancesNodes()
1571
    elif level == locking.LEVEL_NODE_RES:
1572
      # Copy node locks
1573
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1574
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1575

    
1576
  def BuildHooksEnv(self):
1577
    """Build hooks env.
1578

1579
    This runs on master, primary and secondary nodes of the instance.
1580

1581
    """
1582
    env = BuildInstanceHookEnvByObject(self, self.instance)
1583
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1584
    return env
1585

    
1586
  def BuildHooksNodes(self):
1587
    """Build hooks nodes.
1588

1589
    """
1590
    nl = [self.cfg.GetMasterNode()]
1591
    nl_post = list(self.instance.all_nodes) + nl
1592
    return (nl, nl_post)
1593

    
1594
  def CheckPrereq(self):
1595
    """Check prerequisites.
1596

1597
    This checks that the instance is in the cluster.
1598

1599
    """
1600
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1601
    assert self.instance is not None, \
1602
      "Cannot retrieve locked instance %s" % self.op.instance_name
1603

    
1604
  def Exec(self, feedback_fn):
1605
    """Remove the instance.
1606

1607
    """
1608
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1609
                 self.cfg.GetNodeName(self.instance.primary_node))
1610

    
1611
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1612
                                             self.instance,
1613
                                             self.op.shutdown_timeout,
1614
                                             self.op.reason)
1615
    if self.op.ignore_failures:
1616
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1617
    else:
1618
      result.Raise("Could not shutdown instance %s on node %s" %
1619
                   (self.instance.name,
1620
                    self.cfg.GetNodeName(self.instance.primary_node)))
1621

    
1622
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1623
            self.owned_locks(locking.LEVEL_NODE_RES))
1624
    assert not (set(self.instance.all_nodes) -
1625
                self.owned_locks(locking.LEVEL_NODE)), \
1626
      "Not owning correct locks"
1627

    
1628
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1629

    
1630

    
1631
class LUInstanceMove(LogicalUnit):
1632
  """Move an instance by data-copying.
1633

1634
  """
1635
  HPATH = "instance-move"
1636
  HTYPE = constants.HTYPE_INSTANCE
1637
  REQ_BGL = False
1638

    
1639
  def ExpandNames(self):
1640
    self._ExpandAndLockInstance()
1641
    (self.op.target_node_uuid, self.op.target_node) = \
1642
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1643
                            self.op.target_node)
1644
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1645
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1646
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1647

    
1648
  def DeclareLocks(self, level):
1649
    if level == locking.LEVEL_NODE:
1650
      self._LockInstancesNodes(primary_only=True)
1651
    elif level == locking.LEVEL_NODE_RES:
1652
      # Copy node locks
1653
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1654
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1655

    
1656
  def BuildHooksEnv(self):
1657
    """Build hooks env.
1658

1659
    This runs on master, primary and secondary nodes of the instance.
1660

1661
    """
1662
    env = {
1663
      "TARGET_NODE": self.op.target_node,
1664
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1665
      }
1666
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1667
    return env
1668

    
1669
  def BuildHooksNodes(self):
1670
    """Build hooks nodes.
1671

1672
    """
1673
    nl = [
1674
      self.cfg.GetMasterNode(),
1675
      self.instance.primary_node,
1676
      self.op.target_node_uuid,
1677
      ]
1678
    return (nl, nl)
1679

    
1680
  def CheckPrereq(self):
1681
    """Check prerequisites.
1682

1683
    This checks that the instance is in the cluster.
1684

1685
    """
1686
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1687
    assert self.instance is not None, \
1688
      "Cannot retrieve locked instance %s" % self.op.instance_name
1689

    
1690
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1691
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1692
                                 self.instance.disk_template,
1693
                                 errors.ECODE_STATE)
1694

    
1695
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1696
    assert target_node is not None, \
1697
      "Cannot retrieve locked node %s" % self.op.target_node
1698

    
1699
    self.target_node_uuid = target_node.uuid
1700
    if target_node.uuid == self.instance.primary_node:
1701
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1702
                                 (self.instance.name, target_node.name),
1703
                                 errors.ECODE_STATE)
1704

    
1705
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1706

    
1707
    for idx, dsk in enumerate(self.instance.disks):
1708
      if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1709
                              constants.DT_SHARED_FILE):
1710
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1711
                                   " cannot copy" % idx, errors.ECODE_STATE)
1712

    
1713
    CheckNodeOnline(self, target_node.uuid)
1714
    CheckNodeNotDrained(self, target_node.uuid)
1715
    CheckNodeVmCapable(self, target_node.uuid)
1716
    cluster = self.cfg.GetClusterInfo()
1717
    group_info = self.cfg.GetNodeGroup(target_node.group)
1718
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1719
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1720
                           ignore=self.op.ignore_ipolicy)
1721

    
1722
    if self.instance.admin_state == constants.ADMINST_UP:
1723
      # check memory requirements on the secondary node
1724
      CheckNodeFreeMemory(
1725
          self, target_node.uuid, "failing over instance %s" %
1726
          self.instance.name, bep[constants.BE_MAXMEM],
1727
          self.instance.hypervisor,
1728
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1729
    else:
1730
      self.LogInfo("Not checking memory on the secondary node as"
1731
                   " instance will not be started")
1732

    
1733
    # check bridge existance
1734
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1735

    
1736
  def Exec(self, feedback_fn):
1737
    """Move an instance.
1738

1739
    The move is done by shutting it down on its present node, copying
1740
    the data over (slow) and starting it on the new node.
1741

1742
    """
1743
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1744
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1745

    
1746
    self.LogInfo("Shutting down instance %s on source node %s",
1747
                 self.instance.name, source_node.name)
1748

    
1749
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1750
            self.owned_locks(locking.LEVEL_NODE_RES))
1751

    
1752
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1753
                                             self.op.shutdown_timeout,
1754
                                             self.op.reason)
1755
    if self.op.ignore_consistency:
1756
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1757
                  " anyway. Please make sure node %s is down. Error details" %
1758
                  (self.instance.name, source_node.name, source_node.name),
1759
                  self.LogWarning)
1760
    else:
1761
      result.Raise("Could not shutdown instance %s on node %s" %
1762
                   (self.instance.name, source_node.name))
1763

    
1764
    # create the target disks
1765
    try:
1766
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1767
    except errors.OpExecError:
1768
      self.LogWarning("Device creation failed")
1769
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1770
      raise
1771

    
1772
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1773

    
1774
    errs = []
1775
    # activate, get path, copy the data over
1776
    for idx, disk in enumerate(self.instance.disks):
1777
      self.LogInfo("Copying data for disk %d", idx)
1778
      result = self.rpc.call_blockdev_assemble(
1779
                 target_node.uuid, (disk, self.instance), self.instance.name,
1780
                 True, idx)
1781
      if result.fail_msg:
1782
        self.LogWarning("Can't assemble newly created disk %d: %s",
1783
                        idx, result.fail_msg)
1784
        errs.append(result.fail_msg)
1785
        break
1786
      dev_path = result.payload
1787
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1788
                                                                self.instance),
1789
                                             target_node.name, dev_path,
1790
                                             cluster_name)
1791
      if result.fail_msg:
1792
        self.LogWarning("Can't copy data over for disk %d: %s",
1793
                        idx, result.fail_msg)
1794
        errs.append(result.fail_msg)
1795
        break
1796

    
1797
    if errs:
1798
      self.LogWarning("Some disks failed to copy, aborting")
1799
      try:
1800
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1801
      finally:
1802
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1803
        raise errors.OpExecError("Errors during disk copy: %s" %
1804
                                 (",".join(errs),))
1805

    
1806
    self.instance.primary_node = target_node.uuid
1807
    self.cfg.Update(self.instance, feedback_fn)
1808

    
1809
    self.LogInfo("Removing the disks on the original node")
1810
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1811

    
1812
    # Only start the instance if it's marked as up
1813
    if self.instance.admin_state == constants.ADMINST_UP:
1814
      self.LogInfo("Starting instance %s on node %s",
1815
                   self.instance.name, target_node.name)
1816

    
1817
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1818
                                          ignore_secondaries=True)
1819
      if not disks_ok:
1820
        ShutdownInstanceDisks(self, self.instance)
1821
        raise errors.OpExecError("Can't activate the instance's disks")
1822

    
1823
      result = self.rpc.call_instance_start(target_node.uuid,
1824
                                            (self.instance, None, None), False,
1825
                                            self.op.reason)
1826
      msg = result.fail_msg
1827
      if msg:
1828
        ShutdownInstanceDisks(self, self.instance)
1829
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1830
                                 (self.instance.name, target_node.name, msg))
1831

    
1832

    
1833
class LUInstanceMultiAlloc(NoHooksLU):
1834
  """Allocates multiple instances at the same time.
1835

1836
  """
1837
  REQ_BGL = False
1838

    
1839
  def CheckArguments(self):
1840
    """Check arguments.
1841

1842
    """
1843
    nodes = []
1844
    for inst in self.op.instances:
1845
      if inst.iallocator is not None:
1846
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1847
                                   " instance objects", errors.ECODE_INVAL)
1848
      nodes.append(bool(inst.pnode))
1849
      if inst.disk_template in constants.DTS_INT_MIRROR:
1850
        nodes.append(bool(inst.snode))
1851

    
1852
    has_nodes = compat.any(nodes)
1853
    if compat.all(nodes) ^ has_nodes:
1854
      raise errors.OpPrereqError("There are instance objects providing"
1855
                                 " pnode/snode while others do not",
1856
                                 errors.ECODE_INVAL)
1857

    
1858
    if not has_nodes and self.op.iallocator is None:
1859
      default_iallocator = self.cfg.GetDefaultIAllocator()
1860
      if default_iallocator:
1861
        self.op.iallocator = default_iallocator
1862
      else:
1863
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1864
                                   " given and no cluster-wide default"
1865
                                   " iallocator found; please specify either"
1866
                                   " an iallocator or nodes on the instances"
1867
                                   " or set a cluster-wide default iallocator",
1868
                                   errors.ECODE_INVAL)
1869

    
1870
    _CheckOpportunisticLocking(self.op)
1871

    
1872
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1873
    if dups:
1874
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1875
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1876

    
1877
  def ExpandNames(self):
1878
    """Calculate the locks.
1879

1880
    """
1881
    self.share_locks = ShareAll()
1882
    self.needed_locks = {
1883
      # iallocator will select nodes and even if no iallocator is used,
1884
      # collisions with LUInstanceCreate should be avoided
1885
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1886
      }
1887

    
1888
    if self.op.iallocator:
1889
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1890
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1891

    
1892
      if self.op.opportunistic_locking:
1893
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1894
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1895
    else:
1896
      nodeslist = []
1897
      for inst in self.op.instances:
1898
        (inst.pnode_uuid, inst.pnode) = \
1899
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1900
        nodeslist.append(inst.pnode_uuid)
1901
        if inst.snode is not None:
1902
          (inst.snode_uuid, inst.snode) = \
1903
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1904
          nodeslist.append(inst.snode_uuid)
1905

    
1906
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1907
      # Lock resources of instance's primary and secondary nodes (copy to
1908
      # prevent accidential modification)
1909
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1910

    
1911
  def CheckPrereq(self):
1912
    """Check prerequisite.
1913

1914
    """
1915
    if self.op.iallocator:
1916
      cluster = self.cfg.GetClusterInfo()
1917
      default_vg = self.cfg.GetVGName()
1918
      ec_id = self.proc.GetECId()
1919

    
1920
      if self.op.opportunistic_locking:
1921
        # Only consider nodes for which a lock is held
1922
        node_whitelist = self.cfg.GetNodeNames(
1923
                           list(self.owned_locks(locking.LEVEL_NODE)))
1924
      else:
1925
        node_whitelist = None
1926

    
1927
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1928
                                           _ComputeNics(op, cluster, None,
1929
                                                        self.cfg, ec_id),
1930
                                           _ComputeFullBeParams(op, cluster),
1931
                                           node_whitelist)
1932
               for op in self.op.instances]
1933

    
1934
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1935
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1936

    
1937
      ial.Run(self.op.iallocator)
1938

    
1939
      if not ial.success:
1940
        raise errors.OpPrereqError("Can't compute nodes using"
1941
                                   " iallocator '%s': %s" %
1942
                                   (self.op.iallocator, ial.info),
1943
                                   errors.ECODE_NORES)
1944

    
1945
      self.ia_result = ial.result
1946

    
1947
    if self.op.dry_run:
1948
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1949
        constants.JOB_IDS_KEY: [],
1950
        })
1951

    
1952
  def _ConstructPartialResult(self):
1953
    """Contructs the partial result.
1954

1955
    """
1956
    if self.op.iallocator:
1957
      (allocatable, failed_insts) = self.ia_result
1958
      allocatable_insts = map(compat.fst, allocatable)
1959
    else:
1960
      allocatable_insts = [op.instance_name for op in self.op.instances]
1961
      failed_insts = []
1962

    
1963
    return {
1964
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1965
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1966
      }
1967

    
1968
  def Exec(self, feedback_fn):
1969
    """Executes the opcode.
1970

1971
    """
1972
    jobs = []
1973
    if self.op.iallocator:
1974
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
1975
      (allocatable, failed) = self.ia_result
1976

    
1977
      for (name, node_names) in allocatable:
1978
        op = op2inst.pop(name)
1979

    
1980
        (op.pnode_uuid, op.pnode) = \
1981
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1982
        if len(node_names) > 1:
1983
          (op.snode_uuid, op.snode) = \
1984
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1985

    
1986
          jobs.append([op])
1987

    
1988
        missing = set(op2inst.keys()) - set(failed)
1989
        assert not missing, \
1990
          "Iallocator did return incomplete result: %s" % \
1991
          utils.CommaJoin(missing)
1992
    else:
1993
      jobs.extend([op] for op in self.op.instances)
1994

    
1995
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1996

    
1997

    
1998
class _InstNicModPrivate:
1999
  """Data structure for network interface modifications.
2000

2001
  Used by L{LUInstanceSetParams}.
2002

2003
  """
2004
  def __init__(self):
2005
    self.params = None
2006
    self.filled = None
2007

    
2008

    
2009
def _PrepareContainerMods(mods, private_fn):
2010
  """Prepares a list of container modifications by adding a private data field.
2011

2012
  @type mods: list of tuples; (operation, index, parameters)
2013
  @param mods: List of modifications
2014
  @type private_fn: callable or None
2015
  @param private_fn: Callable for constructing a private data field for a
2016
    modification
2017
  @rtype: list
2018

2019
  """
2020
  if private_fn is None:
2021
    fn = lambda: None
2022
  else:
2023
    fn = private_fn
2024

    
2025
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2026

    
2027

    
2028
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2029
  """Checks if nodes have enough physical CPUs
2030

2031
  This function checks if all given nodes have the needed number of
2032
  physical CPUs. In case any node has less CPUs or we cannot get the
2033
  information from the node, this function raises an OpPrereqError
2034
  exception.
2035

2036
  @type lu: C{LogicalUnit}
2037
  @param lu: a logical unit from which we get configuration data
2038
  @type node_uuids: C{list}
2039
  @param node_uuids: the list of node UUIDs to check
2040
  @type requested: C{int}
2041
  @param requested: the minimum acceptable number of physical CPUs
2042
  @type hypervisor_specs: list of pairs (string, dict of strings)
2043
  @param hypervisor_specs: list of hypervisor specifications in
2044
      pairs (hypervisor_name, hvparams)
2045
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2046
      or we cannot check the node
2047

2048
  """
2049
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2050
  for node_uuid in node_uuids:
2051
    info = nodeinfo[node_uuid]
2052
    node_name = lu.cfg.GetNodeName(node_uuid)
2053
    info.Raise("Cannot get current information from node %s" % node_name,
2054
               prereq=True, ecode=errors.ECODE_ENVIRON)
2055
    (_, _, (hv_info, )) = info.payload
2056
    num_cpus = hv_info.get("cpu_total", None)
2057
    if not isinstance(num_cpus, int):
2058
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2059
                                 " on node %s, result was '%s'" %
2060
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2061
    if requested > num_cpus:
2062
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2063
                                 "required" % (node_name, num_cpus, requested),
2064
                                 errors.ECODE_NORES)
2065

    
2066

    
2067
def GetItemFromContainer(identifier, kind, container):
2068
  """Return the item refered by the identifier.
2069

2070
  @type identifier: string
2071
  @param identifier: Item index or name or UUID
2072
  @type kind: string
2073
  @param kind: One-word item description
2074
  @type container: list
2075
  @param container: Container to get the item from
2076

2077
  """
2078
  # Index
2079
  try:
2080
    idx = int(identifier)
2081
    if idx == -1:
2082
      # Append
2083
      absidx = len(container) - 1
2084
    elif idx < 0:
2085
      raise IndexError("Not accepting negative indices other than -1")
2086
    elif idx > len(container):
2087
      raise IndexError("Got %s index %s, but there are only %s" %
2088
                       (kind, idx, len(container)))
2089
    else:
2090
      absidx = idx
2091
    return (absidx, container[idx])
2092
  except ValueError:
2093
    pass
2094

    
2095
  for idx, item in enumerate(container):
2096
    if item.uuid == identifier or item.name == identifier:
2097
      return (idx, item)
2098

    
2099
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2100
                             (kind, identifier), errors.ECODE_NOENT)
2101

    
2102

    
2103
def _ApplyContainerMods(kind, container, chgdesc, mods,
2104
                        create_fn, modify_fn, remove_fn):
2105
  """Applies descriptions in C{mods} to C{container}.
2106

2107
  @type kind: string
2108
  @param kind: One-word item description
2109
  @type container: list
2110
  @param container: Container to modify
2111
  @type chgdesc: None or list
2112
  @param chgdesc: List of applied changes
2113
  @type mods: list
2114
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2115
  @type create_fn: callable
2116
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2117
    receives absolute item index, parameters and private data object as added
2118
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2119
    as list
2120
  @type modify_fn: callable
2121
  @param modify_fn: Callback for modifying an existing item
2122
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2123
    and private data object as added by L{_PrepareContainerMods}, returns
2124
    changes as list
2125
  @type remove_fn: callable
2126
  @param remove_fn: Callback on removing item; receives absolute item index,
2127
    item and private data object as added by L{_PrepareContainerMods}
2128

2129
  """
2130
  for (op, identifier, params, private) in mods:
2131
    changes = None
2132

    
2133
    if op == constants.DDM_ADD:
2134
      # Calculate where item will be added
2135
      # When adding an item, identifier can only be an index
2136
      try:
2137
        idx = int(identifier)
2138
      except ValueError:
2139
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2140
                                   " identifier for %s" % constants.DDM_ADD,
2141
                                   errors.ECODE_INVAL)
2142
      if idx == -1:
2143
        addidx = len(container)
2144
      else:
2145
        if idx < 0:
2146
          raise IndexError("Not accepting negative indices other than -1")
2147
        elif idx > len(container):
2148
          raise IndexError("Got %s index %s, but there are only %s" %
2149
                           (kind, idx, len(container)))
2150
        addidx = idx
2151

    
2152
      if create_fn is None:
2153
        item = params
2154
      else:
2155
        (item, changes) = create_fn(addidx, params, private)
2156

    
2157
      if idx == -1:
2158
        container.append(item)
2159
      else:
2160
        assert idx >= 0
2161
        assert idx <= len(container)
2162
        # list.insert does so before the specified index
2163
        container.insert(idx, item)
2164
    else:
2165
      # Retrieve existing item
2166
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2167

    
2168
      if op == constants.DDM_REMOVE:
2169
        assert not params
2170

    
2171
        if remove_fn is not None:
2172
          remove_fn(absidx, item, private)
2173

    
2174
        changes = [("%s/%s" % (kind, absidx), "remove")]
2175

    
2176
        assert container[absidx] == item
2177
        del container[absidx]
2178
      elif op == constants.DDM_MODIFY:
2179
        if modify_fn is not None:
2180
          changes = modify_fn(absidx, item, params, private)
2181
      else:
2182
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2183

    
2184
    assert _TApplyContModsCbChanges(changes)
2185

    
2186
    if not (chgdesc is None or changes is None):
2187
      chgdesc.extend(changes)
2188

    
2189

    
2190
def _UpdateIvNames(base_index, disks):
2191
  """Updates the C{iv_name} attribute of disks.
2192

2193
  @type disks: list of L{objects.Disk}
2194

2195
  """
2196
  for (idx, disk) in enumerate(disks):
2197
    disk.iv_name = "disk/%s" % (base_index + idx, )
2198

    
2199

    
2200
class LUInstanceSetParams(LogicalUnit):
2201
  """Modifies an instances's parameters.
2202

2203
  """
2204
  HPATH = "instance-modify"
2205
  HTYPE = constants.HTYPE_INSTANCE
2206
  REQ_BGL = False
2207

    
2208
  @staticmethod
2209
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2210
    assert ht.TList(mods)
2211
    assert not mods or len(mods[0]) in (2, 3)
2212

    
2213
    if mods and len(mods[0]) == 2:
2214
      result = []
2215

    
2216
      addremove = 0
2217
      for op, params in mods:
2218
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2219
          result.append((op, -1, params))
2220
          addremove += 1
2221

    
2222
          if addremove > 1:
2223
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2224
                                       " supported at a time" % kind,
2225
                                       errors.ECODE_INVAL)
2226
        else:
2227
          result.append((constants.DDM_MODIFY, op, params))
2228

    
2229
      assert verify_fn(result)
2230
    else:
2231
      result = mods
2232

    
2233
    return result
2234

    
2235
  @staticmethod
2236
  def _CheckMods(kind, mods, key_types, item_fn):
2237
    """Ensures requested disk/NIC modifications are valid.
2238

2239
    """
2240
    for (op, _, params) in mods:
2241
      assert ht.TDict(params)
2242

    
2243
      # If 'key_types' is an empty dict, we assume we have an
2244
      # 'ext' template and thus do not ForceDictType
2245
      if key_types:
2246
        utils.ForceDictType(params, key_types)
2247

    
2248
      if op == constants.DDM_REMOVE:
2249
        if params:
2250
          raise errors.OpPrereqError("No settings should be passed when"
2251
                                     " removing a %s" % kind,
2252
                                     errors.ECODE_INVAL)
2253
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2254
        item_fn(op, params)
2255
      else:
2256
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2257

    
2258
  @staticmethod
2259
  def _VerifyDiskModification(op, params, excl_stor):
2260
    """Verifies a disk modification.
2261

2262
    """
2263
    if op == constants.DDM_ADD:
2264
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2265
      if mode not in constants.DISK_ACCESS_SET:
2266
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2267
                                   errors.ECODE_INVAL)
2268

    
2269
      size = params.get(constants.IDISK_SIZE, None)
2270
      if size is None:
2271
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2272
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2273

    
2274
      try:
2275
        size = int(size)
2276
      except (TypeError, ValueError), err:
2277
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2278
                                   errors.ECODE_INVAL)
2279

    
2280
      params[constants.IDISK_SIZE] = size
2281
      name = params.get(constants.IDISK_NAME, None)
2282
      if name is not None and name.lower() == constants.VALUE_NONE:
2283
        params[constants.IDISK_NAME] = None
2284

    
2285
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2286

    
2287
    elif op == constants.DDM_MODIFY:
2288
      if constants.IDISK_SIZE in params:
2289
        raise errors.OpPrereqError("Disk size change not possible, use"
2290
                                   " grow-disk", errors.ECODE_INVAL)
2291
      if len(params) > 2:
2292
        raise errors.OpPrereqError("Disk modification doesn't support"
2293
                                   " additional arbitrary parameters",
2294
                                   errors.ECODE_INVAL)
2295
      name = params.get(constants.IDISK_NAME, None)
2296
      if name is not None and name.lower() == constants.VALUE_NONE:
2297
        params[constants.IDISK_NAME] = None
2298

    
2299
  @staticmethod
2300
  def _VerifyNicModification(op, params):
2301
    """Verifies a network interface modification.
2302

2303
    """
2304
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2305
      ip = params.get(constants.INIC_IP, None)
2306
      name = params.get(constants.INIC_NAME, None)
2307
      req_net = params.get(constants.INIC_NETWORK, None)
2308
      link = params.get(constants.NIC_LINK, None)
2309
      mode = params.get(constants.NIC_MODE, None)
2310
      if name is not None and name.lower() == constants.VALUE_NONE:
2311
        params[constants.INIC_NAME] = None
2312
      if req_net is not None:
2313
        if req_net.lower() == constants.VALUE_NONE:
2314
          params[constants.INIC_NETWORK] = None
2315
          req_net = None
2316
        elif link is not None or mode is not None:
2317
          raise errors.OpPrereqError("If network is given"
2318
                                     " mode or link should not",
2319
                                     errors.ECODE_INVAL)
2320

    
2321
      if op == constants.DDM_ADD:
2322
        macaddr = params.get(constants.INIC_MAC, None)
2323
        if macaddr is None:
2324
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2325

    
2326
      if ip is not None:
2327
        if ip.lower() == constants.VALUE_NONE:
2328
          params[constants.INIC_IP] = None
2329
        else:
2330
          if ip.lower() == constants.NIC_IP_POOL:
2331
            if op == constants.DDM_ADD and req_net is None:
2332
              raise errors.OpPrereqError("If ip=pool, parameter network"
2333
                                         " cannot be none",
2334
                                         errors.ECODE_INVAL)
2335
          else:
2336
            if not netutils.IPAddress.IsValid(ip):
2337
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2338
                                         errors.ECODE_INVAL)
2339

    
2340
      if constants.INIC_MAC in params:
2341
        macaddr = params[constants.INIC_MAC]
2342
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2343
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2344

    
2345
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2346
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2347
                                     " modifying an existing NIC",
2348
                                     errors.ECODE_INVAL)
2349

    
2350
  def CheckArguments(self):
2351
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2352
            self.op.hvparams or self.op.beparams or self.op.os_name or
2353
            self.op.osparams or self.op.offline is not None or
2354
            self.op.runtime_mem or self.op.pnode):
2355
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2356

    
2357
    if self.op.hvparams:
2358
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2359
                           "hypervisor", "instance", "cluster")
2360

    
2361
    self.op.disks = self._UpgradeDiskNicMods(
2362
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2363
    self.op.nics = self._UpgradeDiskNicMods(
2364
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2365

    
2366
    if self.op.disks and self.op.disk_template is not None:
2367
      raise errors.OpPrereqError("Disk template conversion and other disk"
2368
                                 " changes not supported at the same time",
2369
                                 errors.ECODE_INVAL)
2370

    
2371
    if (self.op.disk_template and
2372
        self.op.disk_template in constants.DTS_INT_MIRROR and
2373
        self.op.remote_node is None):
2374
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2375
                                 " one requires specifying a secondary node",
2376
                                 errors.ECODE_INVAL)
2377

    
2378
    # Check NIC modifications
2379
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2380
                    self._VerifyNicModification)
2381

    
2382
    if self.op.pnode:
2383
      (self.op.pnode_uuid, self.op.pnode) = \
2384
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2385

    
2386
  def ExpandNames(self):
2387
    self._ExpandAndLockInstance()
2388
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2389
    # Can't even acquire node locks in shared mode as upcoming changes in
2390
    # Ganeti 2.6 will start to modify the node object on disk conversion
2391
    self.needed_locks[locking.LEVEL_NODE] = []
2392
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2393
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2394
    # Look node group to look up the ipolicy
2395
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2396

    
2397
  def DeclareLocks(self, level):
2398
    if level == locking.LEVEL_NODEGROUP:
2399
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2400
      # Acquire locks for the instance's nodegroups optimistically. Needs
2401
      # to be verified in CheckPrereq
2402
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2403
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2404
    elif level == locking.LEVEL_NODE:
2405
      self._LockInstancesNodes()
2406
      if self.op.disk_template and self.op.remote_node:
2407
        (self.op.remote_node_uuid, self.op.remote_node) = \
2408
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2409
                                self.op.remote_node)
2410
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2411
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2412
      # Copy node locks
2413
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2414
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2415

    
2416
  def BuildHooksEnv(self):
2417
    """Build hooks env.
2418

2419
    This runs on the master, primary and secondaries.
2420

2421
    """
2422
    args = {}
2423
    if constants.BE_MINMEM in self.be_new:
2424
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2425
    if constants.BE_MAXMEM in self.be_new:
2426
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2427
    if constants.BE_VCPUS in self.be_new:
2428
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2429
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2430
    # information at all.
2431

    
2432
    if self._new_nics is not None:
2433
      nics = []
2434

    
2435
      for nic in self._new_nics:
2436
        n = copy.deepcopy(nic)
2437
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2438
        n.nicparams = nicparams
2439
        nics.append(NICToTuple(self, n))
2440

    
2441
      args["nics"] = nics
2442

    
2443
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2444
    if self.op.disk_template:
2445
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2446
    if self.op.runtime_mem:
2447
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2448

    
2449
    return env
2450

    
2451
  def BuildHooksNodes(self):
2452
    """Build hooks nodes.
2453

2454
    """
2455
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2456
    return (nl, nl)
2457

    
2458
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2459
                              old_params, cluster, pnode_uuid):
2460

    
2461
    update_params_dict = dict([(key, params[key])
2462
                               for key in constants.NICS_PARAMETERS
2463
                               if key in params])
2464

    
2465
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2466
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2467

    
2468
    new_net_uuid = None
2469
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2470
    if new_net_uuid_or_name:
2471
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2472
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2473

    
2474
    if old_net_uuid:
2475
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2476

    
2477
    if new_net_uuid:
2478
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2479
      if not netparams:
2480
        raise errors.OpPrereqError("No netparams found for the network"
2481
                                   " %s, probably not connected" %
2482
                                   new_net_obj.name, errors.ECODE_INVAL)
2483
      new_params = dict(netparams)
2484
    else:
2485
      new_params = GetUpdatedParams(old_params, update_params_dict)
2486

    
2487
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2488

    
2489
    new_filled_params = cluster.SimpleFillNIC(new_params)
2490
    objects.NIC.CheckParameterSyntax(new_filled_params)
2491

    
2492
    new_mode = new_filled_params[constants.NIC_MODE]
2493
    if new_mode == constants.NIC_MODE_BRIDGED:
2494
      bridge = new_filled_params[constants.NIC_LINK]
2495
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2496
      if msg:
2497
        msg = "Error checking bridges on node '%s': %s" % \
2498
                (self.cfg.GetNodeName(pnode_uuid), msg)
2499
        if self.op.force:
2500
          self.warn.append(msg)
2501
        else:
2502
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2503

    
2504
    elif new_mode == constants.NIC_MODE_ROUTED:
2505
      ip = params.get(constants.INIC_IP, old_ip)
2506
      if ip is None:
2507
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2508
                                   " on a routed NIC", errors.ECODE_INVAL)
2509

    
2510
    elif new_mode == constants.NIC_MODE_OVS:
2511
      # TODO: check OVS link
2512
      self.LogInfo("OVS links are currently not checked for correctness")
2513

    
2514
    if constants.INIC_MAC in params:
2515
      mac = params[constants.INIC_MAC]
2516
      if mac is None:
2517
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2518
                                   errors.ECODE_INVAL)
2519
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2520
        # otherwise generate the MAC address
2521
        params[constants.INIC_MAC] = \
2522
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2523
      else:
2524
        # or validate/reserve the current one
2525
        try:
2526
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2527
        except errors.ReservationError:
2528
          raise errors.OpPrereqError("MAC address '%s' already in use"
2529
                                     " in cluster" % mac,
2530
                                     errors.ECODE_NOTUNIQUE)
2531
    elif new_net_uuid != old_net_uuid:
2532

    
2533
      def get_net_prefix(net_uuid):
2534
        mac_prefix = None
2535
        if net_uuid:
2536
          nobj = self.cfg.GetNetwork(net_uuid)
2537
          mac_prefix = nobj.mac_prefix
2538

    
2539
        return mac_prefix
2540

    
2541
      new_prefix = get_net_prefix(new_net_uuid)
2542
      old_prefix = get_net_prefix(old_net_uuid)
2543
      if old_prefix != new_prefix:
2544
        params[constants.INIC_MAC] = \
2545
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2546

    
2547
    # if there is a change in (ip, network) tuple
2548
    new_ip = params.get(constants.INIC_IP, old_ip)
2549
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2550
      if new_ip:
2551
        # if IP is pool then require a network and generate one IP
2552
        if new_ip.lower() == constants.NIC_IP_POOL:
2553
          if new_net_uuid:
2554
            try:
2555
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2556
            except errors.ReservationError:
2557
              raise errors.OpPrereqError("Unable to get a free IP"
2558
                                         " from the address pool",
2559
                                         errors.ECODE_STATE)
2560
            self.LogInfo("Chose IP %s from network %s",
2561
                         new_ip,
2562
                         new_net_obj.name)
2563
            params[constants.INIC_IP] = new_ip
2564
          else:
2565
            raise errors.OpPrereqError("ip=pool, but no network found",
2566
                                       errors.ECODE_INVAL)
2567
        # Reserve new IP if in the new network if any
2568
        elif new_net_uuid:
2569
          try:
2570
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2571
            self.LogInfo("Reserving IP %s in network %s",
2572
                         new_ip, new_net_obj.name)
2573
          except errors.ReservationError:
2574
            raise errors.OpPrereqError("IP %s not available in network %s" %
2575
                                       (new_ip, new_net_obj.name),
2576
                                       errors.ECODE_NOTUNIQUE)
2577
        # new network is None so check if new IP is a conflicting IP
2578
        elif self.op.conflicts_check:
2579
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2580

    
2581
      # release old IP if old network is not None
2582
      if old_ip and old_net_uuid:
2583
        try:
2584
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2585
        except errors.AddressPoolError:
2586
          logging.warning("Release IP %s not contained in network %s",
2587
                          old_ip, old_net_obj.name)
2588

    
2589
    # there are no changes in (ip, network) tuple and old network is not None
2590
    elif (old_net_uuid is not None and
2591
          (req_link is not None or req_mode is not None)):
2592
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2593
                                 " a NIC that is connected to a network",
2594
                                 errors.ECODE_INVAL)
2595

    
2596
    private.params = new_params
2597
    private.filled = new_filled_params
2598

    
2599
  def _PreCheckDiskTemplate(self, pnode_info):
2600
    """CheckPrereq checks related to a new disk template."""
2601
    # Arguments are passed to avoid configuration lookups
2602
    pnode_uuid = self.instance.primary_node
2603
    if self.instance.disk_template == self.op.disk_template:
2604
      raise errors.OpPrereqError("Instance already has disk template %s" %
2605
                                 self.instance.disk_template,
2606
                                 errors.ECODE_INVAL)
2607

    
2608
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2609
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2610
                                 " cluster." % self.op.disk_template)
2611

    
2612
    if (self.instance.disk_template,
2613
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2614
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2615
                                 " %s to %s" % (self.instance.disk_template,
2616
                                                self.op.disk_template),
2617
                                 errors.ECODE_INVAL)
2618
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2619
                       msg="cannot change disk template")
2620
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2621
      if self.op.remote_node_uuid == pnode_uuid:
2622
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2623
                                   " as the primary node of the instance" %
2624
                                   self.op.remote_node, errors.ECODE_STATE)
2625
      CheckNodeOnline(self, self.op.remote_node_uuid)
2626
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2627
      # FIXME: here we assume that the old instance type is DT_PLAIN
2628
      assert self.instance.disk_template == constants.DT_PLAIN
2629
      disks = [{constants.IDISK_SIZE: d.size,
2630
                constants.IDISK_VG: d.logical_id[0]}
2631
               for d in self.instance.disks]
2632
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2633
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2634

    
2635
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2636
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2637
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2638
                                                              snode_group)
2639
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2640
                             ignore=self.op.ignore_ipolicy)
2641
      if pnode_info.group != snode_info.group:
2642
        self.LogWarning("The primary and secondary nodes are in two"
2643
                        " different node groups; the disk parameters"
2644
                        " from the first disk's node group will be"
2645
                        " used")
2646

    
2647
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2648
      # Make sure none of the nodes require exclusive storage
2649
      nodes = [pnode_info]
2650
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2651
        assert snode_info
2652
        nodes.append(snode_info)
2653
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2654
      if compat.any(map(has_es, nodes)):
2655
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2656
                  " storage is enabled" % (self.instance.disk_template,
2657
                                           self.op.disk_template))
2658
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2659

    
2660
  def _PreCheckDisks(self, ispec):
2661
    """CheckPrereq checks related to disk changes.
2662

2663
    @type ispec: dict
2664
    @param ispec: instance specs to be updated with the new disks
2665

2666
    """
2667
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2668

    
2669
    excl_stor = compat.any(
2670
      rpc.GetExclusiveStorageForNodes(self.cfg,
2671
                                      self.instance.all_nodes).values()
2672
      )
2673

    
2674
    # Check disk modifications. This is done here and not in CheckArguments
2675
    # (as with NICs), because we need to know the instance's disk template
2676
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2677
    if self.instance.disk_template == constants.DT_EXT:
2678
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2679
    else:
2680
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2681
                      ver_fn)
2682

    
2683
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2684

    
2685
    # Check the validity of the `provider' parameter
2686
    if self.instance.disk_template in constants.DT_EXT:
2687
      for mod in self.diskmod:
2688
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2689
        if mod[0] == constants.DDM_ADD:
2690
          if ext_provider is None:
2691
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2692
                                       " '%s' missing, during disk add" %
2693
                                       (constants.DT_EXT,
2694
                                        constants.IDISK_PROVIDER),
2695
                                       errors.ECODE_NOENT)
2696
        elif mod[0] == constants.DDM_MODIFY:
2697
          if ext_provider:
2698
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2699
                                       " modification" %
2700
                                       constants.IDISK_PROVIDER,
2701
                                       errors.ECODE_INVAL)
2702
    else:
2703
      for mod in self.diskmod:
2704
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2705
        if ext_provider is not None:
2706
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2707
                                     " instances of type '%s'" %
2708
                                     (constants.IDISK_PROVIDER,
2709
                                      constants.DT_EXT),
2710
                                     errors.ECODE_INVAL)
2711

    
2712
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2713
      raise errors.OpPrereqError("Disk operations not supported for"
2714
                                 " diskless instances", errors.ECODE_INVAL)
2715

    
2716
    def _PrepareDiskMod(_, disk, params, __):
2717
      disk.name = params.get(constants.IDISK_NAME, None)
2718

    
2719
    # Verify disk changes (operating on a copy)
2720
    disks = copy.deepcopy(self.instance.disks)
2721
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2722
                        _PrepareDiskMod, None)
2723
    utils.ValidateDeviceNames("disk", disks)
2724
    if len(disks) > constants.MAX_DISKS:
2725
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2726
                                 " more" % constants.MAX_DISKS,
2727
                                 errors.ECODE_STATE)
2728
    disk_sizes = [disk.size for disk in self.instance.disks]
2729
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2730
                      self.diskmod if op == constants.DDM_ADD)
2731
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2732
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2733

    
2734
    if self.op.offline is not None and self.op.offline:
2735
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2736
                         msg="can't change to offline")
2737

    
2738
  def CheckPrereq(self):
2739
    """Check prerequisites.
2740

2741
    This only checks the instance list against the existing names.
2742

2743
    """
2744
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2745
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2746
    self.cluster = self.cfg.GetClusterInfo()
2747

    
2748
    assert self.instance is not None, \
2749
      "Cannot retrieve locked instance %s" % self.op.instance_name
2750

    
2751
    pnode_uuid = self.instance.primary_node
2752

    
2753
    self.warn = []
2754

    
2755
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2756
        not self.op.force):
2757
      # verify that the instance is not up
2758
      instance_info = self.rpc.call_instance_info(
2759
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2760
          self.instance.hvparams)
2761
      if instance_info.fail_msg:
2762
        self.warn.append("Can't get instance runtime information: %s" %
2763
                         instance_info.fail_msg)
2764
      elif instance_info.payload:
2765
        raise errors.OpPrereqError("Instance is still running on %s" %
2766
                                   self.cfg.GetNodeName(pnode_uuid),
2767
                                   errors.ECODE_STATE)
2768

    
2769
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2770
    node_uuids = list(self.instance.all_nodes)
2771
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2772

    
2773
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2774
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2775
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2776

    
2777
    # dictionary with instance information after the modification
2778
    ispec = {}
2779

    
2780
    # Prepare NIC modifications
2781
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2782

    
2783
    # OS change
2784
    if self.op.os_name and not self.op.force:
2785
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2786
                     self.op.force_variant)
2787
      instance_os = self.op.os_name
2788
    else:
2789
      instance_os = self.instance.os
2790

    
2791
    assert not (self.op.disk_template and self.op.disks), \
2792
      "Can't modify disk template and apply disk changes at the same time"
2793

    
2794
    if self.op.disk_template:
2795
      self._PreCheckDiskTemplate(pnode_info)
2796

    
2797
    self._PreCheckDisks(ispec)
2798

    
2799
    # hvparams processing
2800
    if self.op.hvparams:
2801
      hv_type = self.instance.hypervisor
2802
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2803
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2804
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2805

    
2806
      # local check
2807
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2808
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2809
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2810
      self.hv_inst = i_hvdict # the new dict (without defaults)
2811
    else:
2812
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2813
                                                   self.instance.os,
2814
                                                   self.instance.hvparams)
2815
      self.hv_new = self.hv_inst = {}
2816

    
2817
    # beparams processing
2818
    if self.op.beparams:
2819
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2820
                                  use_none=True)
2821
      objects.UpgradeBeParams(i_bedict)
2822
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2823
      be_new = self.cluster.SimpleFillBE(i_bedict)
2824
      self.be_proposed = self.be_new = be_new # the new actual values
2825
      self.be_inst = i_bedict # the new dict (without defaults)
2826
    else:
2827
      self.be_new = self.be_inst = {}
2828
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2829
    be_old = self.cluster.FillBE(self.instance)
2830

    
2831
    # CPU param validation -- checking every time a parameter is
2832
    # changed to cover all cases where either CPU mask or vcpus have
2833
    # changed
2834
    if (constants.BE_VCPUS in self.be_proposed and
2835
        constants.HV_CPU_MASK in self.hv_proposed):
2836
      cpu_list = \
2837
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2838
      # Verify mask is consistent with number of vCPUs. Can skip this
2839
      # test if only 1 entry in the CPU mask, which means same mask
2840
      # is applied to all vCPUs.
2841
      if (len(cpu_list) > 1 and
2842
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2843
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2844
                                   " CPU mask [%s]" %
2845
                                   (self.be_proposed[constants.BE_VCPUS],
2846
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2847
                                   errors.ECODE_INVAL)
2848

    
2849
      # Only perform this test if a new CPU mask is given
2850
      if constants.HV_CPU_MASK in self.hv_new:
2851
        # Calculate the largest CPU number requested
2852
        max_requested_cpu = max(map(max, cpu_list))
2853
        # Check that all of the instance's nodes have enough physical CPUs to
2854
        # satisfy the requested CPU mask
2855
        hvspecs = [(self.instance.hypervisor,
2856
                    self.cfg.GetClusterInfo()
2857
                      .hvparams[self.instance.hypervisor])]
2858
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2859
                                max_requested_cpu + 1,
2860
                                hvspecs)
2861

    
2862
    # osparams processing
2863
    if self.op.osparams:
2864
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2865
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2866
      self.os_inst = i_osdict # the new dict (without defaults)
2867
    else:
2868
      self.os_inst = {}
2869

    
2870
    #TODO(dynmem): do the appropriate check involving MINMEM
2871
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2872
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2873
      mem_check_list = [pnode_uuid]
2874
      if be_new[constants.BE_AUTO_BALANCE]:
2875
        # either we changed auto_balance to yes or it was from before
2876
        mem_check_list.extend(self.instance.secondary_nodes)
2877
      instance_info = self.rpc.call_instance_info(
2878
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2879
          self.instance.hvparams)
2880
      hvspecs = [(self.instance.hypervisor,
2881
                  self.cluster.hvparams[self.instance.hypervisor])]
2882
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2883
                                         hvspecs)
2884
      pninfo = nodeinfo[pnode_uuid]
2885
      msg = pninfo.fail_msg
2886
      if msg:
2887
        # Assume the primary node is unreachable and go ahead
2888
        self.warn.append("Can't get info from primary node %s: %s" %
2889
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2890
      else:
2891
        (_, _, (pnhvinfo, )) = pninfo.payload
2892
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2893
          self.warn.append("Node data from primary node %s doesn't contain"
2894
                           " free memory information" %
2895
                           self.cfg.GetNodeName(pnode_uuid))
2896
        elif instance_info.fail_msg:
2897
          self.warn.append("Can't get instance runtime information: %s" %
2898
                           instance_info.fail_msg)
2899
        else:
2900
          if instance_info.payload:
2901
            current_mem = int(instance_info.payload["memory"])
2902
          else:
2903
            # Assume instance not running
2904
            # (there is a slight race condition here, but it's not very
2905
            # probable, and we have no other way to check)
2906
            # TODO: Describe race condition
2907
            current_mem = 0
2908
          #TODO(dynmem): do the appropriate check involving MINMEM
2909
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2910
                      pnhvinfo["memory_free"])
2911
          if miss_mem > 0:
2912
            raise errors.OpPrereqError("This change will prevent the instance"
2913
                                       " from starting, due to %d MB of memory"
2914
                                       " missing on its primary node" %
2915
                                       miss_mem, errors.ECODE_NORES)
2916

    
2917
      if be_new[constants.BE_AUTO_BALANCE]:
2918
        for node_uuid, nres in nodeinfo.items():
2919
          if node_uuid not in self.instance.secondary_nodes:
2920
            continue
2921
          nres.Raise("Can't get info from secondary node %s" %
2922
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2923
                     ecode=errors.ECODE_STATE)
2924
          (_, _, (nhvinfo, )) = nres.payload
2925
          if not isinstance(nhvinfo.get("memory_free", None), int):
2926
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2927
                                       " memory information" %
2928
                                       self.cfg.GetNodeName(node_uuid),
2929
                                       errors.ECODE_STATE)
2930
          #TODO(dynmem): do the appropriate check involving MINMEM
2931
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2932
            raise errors.OpPrereqError("This change will prevent the instance"
2933
                                       " from failover to its secondary node"
2934
                                       " %s, due to not enough memory" %
2935
                                       self.cfg.GetNodeName(node_uuid),
2936
                                       errors.ECODE_STATE)
2937

    
2938
    if self.op.runtime_mem:
2939
      remote_info = self.rpc.call_instance_info(
2940
         self.instance.primary_node, self.instance.name,
2941
         self.instance.hypervisor,
2942
         self.cluster.hvparams[self.instance.hypervisor])
2943
      remote_info.Raise("Error checking node %s" %
2944
                        self.cfg.GetNodeName(self.instance.primary_node))
2945
      if not remote_info.payload: # not running already
2946
        raise errors.OpPrereqError("Instance %s is not running" %
2947
                                   self.instance.name, errors.ECODE_STATE)
2948

    
2949
      current_memory = remote_info.payload["memory"]
2950
      if (not self.op.force and
2951
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2952
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2953
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2954
                                   " and %d MB of memory unless --force is"
2955
                                   " given" %
2956
                                   (self.instance.name,
2957
                                    self.be_proposed[constants.BE_MINMEM],
2958
                                    self.be_proposed[constants.BE_MAXMEM]),
2959
                                   errors.ECODE_INVAL)
2960

    
2961
      delta = self.op.runtime_mem - current_memory
2962
      if delta > 0:
2963
        CheckNodeFreeMemory(
2964
            self, self.instance.primary_node,
2965
            "ballooning memory for instance %s" % self.instance.name, delta,
2966
            self.instance.hypervisor,
2967
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2968

    
2969
    # make self.cluster visible in the functions below
2970
    cluster = self.cluster
2971

    
2972
    def _PrepareNicCreate(_, params, private):
2973
      self._PrepareNicModification(params, private, None, None,
2974
                                   {}, cluster, pnode_uuid)
2975
      return (None, None)
2976

    
2977
    def _PrepareNicMod(_, nic, params, private):
2978
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2979
                                   nic.nicparams, cluster, pnode_uuid)
2980
      return None
2981

    
2982
    def _PrepareNicRemove(_, params, __):
2983
      ip = params.ip
2984
      net = params.network
2985
      if net is not None and ip is not None:
2986
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2987

    
2988
    # Verify NIC changes (operating on copy)
2989
    nics = self.instance.nics[:]
2990
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2991
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2992
    if len(nics) > constants.MAX_NICS:
2993
      raise errors.OpPrereqError("Instance has too many network interfaces"
2994
                                 " (%d), cannot add more" % constants.MAX_NICS,
2995
                                 errors.ECODE_STATE)
2996

    
2997
    # Pre-compute NIC changes (necessary to use result in hooks)
2998
    self._nic_chgdesc = []
2999
    if self.nicmod:
3000
      # Operate on copies as this is still in prereq
3001
      nics = [nic.Copy() for nic in self.instance.nics]
3002
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3003
                          self._CreateNewNic, self._ApplyNicMods, None)
3004
      # Verify that NIC names are unique and valid
3005
      utils.ValidateDeviceNames("NIC", nics)
3006
      self._new_nics = nics
3007
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3008
    else:
3009
      self._new_nics = None
3010
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3011

    
3012
    if not self.op.ignore_ipolicy:
3013
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3014
                                                              group_info)
3015

    
3016
      # Fill ispec with backend parameters
3017
      ispec[constants.ISPEC_SPINDLE_USE] = \
3018
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3019
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3020
                                                         None)
3021

    
3022
      # Copy ispec to verify parameters with min/max values separately
3023
      if self.op.disk_template:
3024
        new_disk_template = self.op.disk_template
3025
      else:
3026
        new_disk_template = self.instance.disk_template
3027
      ispec_max = ispec.copy()
3028
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3029
        self.be_new.get(constants.BE_MAXMEM, None)
3030
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3031
                                                     new_disk_template)
3032
      ispec_min = ispec.copy()
3033
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3034
        self.be_new.get(constants.BE_MINMEM, None)
3035
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3036
                                                     new_disk_template)
3037

    
3038
      if (res_max or res_min):
3039
        # FIXME: Improve error message by including information about whether
3040
        # the upper or lower limit of the parameter fails the ipolicy.
3041
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3042
               (group_info, group_info.name,
3043
                utils.CommaJoin(set(res_max + res_min))))
3044
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3045

    
3046
  def _ConvertPlainToDrbd(self, feedback_fn):
3047
    """Converts an instance from plain to drbd.
3048

3049
    """
3050
    feedback_fn("Converting template to drbd")
3051
    pnode_uuid = self.instance.primary_node
3052
    snode_uuid = self.op.remote_node_uuid
3053

    
3054
    assert self.instance.disk_template == constants.DT_PLAIN
3055

    
3056
    # create a fake disk info for _GenerateDiskTemplate
3057
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3058
                  constants.IDISK_VG: d.logical_id[0],
3059
                  constants.IDISK_NAME: d.name}
3060
                 for d in self.instance.disks]
3061
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3062
                                     self.instance.uuid, pnode_uuid,
3063
                                     [snode_uuid], disk_info, None, None, 0,
3064
                                     feedback_fn, self.diskparams)
3065
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3066
                                        self.diskparams)
3067
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3068
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3069
    info = GetInstanceInfoText(self.instance)
3070
    feedback_fn("Creating additional volumes...")
3071
    # first, create the missing data and meta devices
3072
    for disk in anno_disks:
3073
      # unfortunately this is... not too nice
3074
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3075
                           info, True, p_excl_stor)
3076
      for child in disk.children:
3077
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3078
                             s_excl_stor)
3079
    # at this stage, all new LVs have been created, we can rename the
3080
    # old ones
3081
    feedback_fn("Renaming original volumes...")
3082
    rename_list = [(o, n.children[0].logical_id)
3083
                   for (o, n) in zip(self.instance.disks, new_disks)]
3084
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3085
    result.Raise("Failed to rename original LVs")
3086

    
3087
    feedback_fn("Initializing DRBD devices...")
3088
    # all child devices are in place, we can now create the DRBD devices
3089
    try:
3090
      for disk in anno_disks:
3091
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3092
                                       (snode_uuid, s_excl_stor)]:
3093
          f_create = node_uuid == pnode_uuid
3094
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3095
                               f_create, excl_stor)
3096
    except errors.GenericError, e:
3097
      feedback_fn("Initializing of DRBD devices failed;"
3098
                  " renaming back original volumes...")
3099
      for disk in new_disks:
3100
        self.cfg.SetDiskID(disk, pnode_uuid)
3101
      rename_back_list = [(n.children[0], o.logical_id)
3102
                          for (n, o) in zip(new_disks, self.instance.disks)]
3103
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3104
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3105
      raise
3106

    
3107
    # at this point, the instance has been modified
3108
    self.instance.disk_template = constants.DT_DRBD8
3109
    self.instance.disks = new_disks
3110
    self.cfg.Update(self.instance, feedback_fn)
3111

    
3112
    # Release node locks while waiting for sync
3113
    ReleaseLocks(self, locking.LEVEL_NODE)
3114

    
3115
    # disks are created, waiting for sync
3116
    disk_abort = not WaitForSync(self, self.instance,
3117
                                 oneshot=not self.op.wait_for_sync)
3118
    if disk_abort:
3119
      raise errors.OpExecError("There are some degraded disks for"
3120
                               " this instance, please cleanup manually")
3121

    
3122
    # Node resource locks will be released by caller
3123

    
3124
  def _ConvertDrbdToPlain(self, feedback_fn):
3125
    """Converts an instance from drbd to plain.
3126

3127
    """
3128
    assert len(self.instance.secondary_nodes) == 1
3129
    assert self.instance.disk_template == constants.DT_DRBD8
3130

    
3131
    pnode_uuid = self.instance.primary_node
3132
    snode_uuid = self.instance.secondary_nodes[0]
3133
    feedback_fn("Converting template to plain")
3134

    
3135
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3136
    new_disks = [d.children[0] for d in self.instance.disks]
3137

    
3138
    # copy over size, mode and name
3139
    for parent, child in zip(old_disks, new_disks):
3140
      child.size = parent.size
3141
      child.mode = parent.mode
3142
      child.name = parent.name
3143

    
3144
    # this is a DRBD disk, return its port to the pool
3145
    # NOTE: this must be done right before the call to cfg.Update!
3146
    for disk in old_disks:
3147
      tcp_port = disk.logical_id[2]
3148
      self.cfg.AddTcpUdpPort(tcp_port)
3149

    
3150
    # update instance structure
3151
    self.instance.disks = new_disks
3152
    self.instance.disk_template = constants.DT_PLAIN
3153
    _UpdateIvNames(0, self.instance.disks)
3154
    self.cfg.Update(self.instance, feedback_fn)
3155

    
3156
    # Release locks in case removing disks takes a while
3157
    ReleaseLocks(self, locking.LEVEL_NODE)
3158

    
3159
    feedback_fn("Removing volumes on the secondary node...")
3160
    for disk in old_disks:
3161
      self.cfg.SetDiskID(disk, snode_uuid)
3162
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3163
      if msg:
3164
        self.LogWarning("Could not remove block device %s on node %s,"
3165
                        " continuing anyway: %s", disk.iv_name,
3166
                        self.cfg.GetNodeName(snode_uuid), msg)
3167

    
3168
    feedback_fn("Removing unneeded volumes on the primary node...")
3169
    for idx, disk in enumerate(old_disks):
3170
      meta = disk.children[1]
3171
      self.cfg.SetDiskID(meta, pnode_uuid)
3172
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3173
      if msg:
3174
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3175
                        " continuing anyway: %s", idx,
3176
                        self.cfg.GetNodeName(pnode_uuid), msg)
3177

    
3178
  def _CreateNewDisk(self, idx, params, _):
3179
    """Creates a new disk.
3180

3181
    """
3182
    # add a new disk
3183
    if self.instance.disk_template in constants.DTS_FILEBASED:
3184
      (file_driver, file_path) = self.instance.disks[0].logical_id
3185
      file_path = os.path.dirname(file_path)
3186
    else:
3187
      file_driver = file_path = None
3188

    
3189
    disk = \
3190
      GenerateDiskTemplate(self, self.instance.disk_template,
3191
                           self.instance.uuid, self.instance.primary_node,
3192
                           self.instance.secondary_nodes, [params], file_path,
3193
                           file_driver, idx, self.Log, self.diskparams)[0]
3194

    
3195
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3196

    
3197
    if self.cluster.prealloc_wipe_disks:
3198
      # Wipe new disk
3199
      WipeOrCleanupDisks(self, self.instance,
3200
                         disks=[(idx, disk, 0)],
3201
                         cleanup=new_disks)
3202

    
3203
    return (disk, [
3204
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3205
      ])
3206

    
3207
  @staticmethod
3208
  def _ModifyDisk(idx, disk, params, _):
3209
    """Modifies a disk.
3210

3211
    """
3212
    changes = []
3213
    mode = params.get(constants.IDISK_MODE, None)
3214
    if mode:
3215
      disk.mode = mode
3216
      changes.append(("disk.mode/%d" % idx, disk.mode))
3217

    
3218
    name = params.get(constants.IDISK_NAME, None)
3219
    disk.name = name
3220
    changes.append(("disk.name/%d" % idx, disk.name))
3221

    
3222
    return changes
3223

    
3224
  def _RemoveDisk(self, idx, root, _):
3225
    """Removes a disk.
3226

3227
    """
3228
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3229
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3230
                             self.instance.primary_node):
3231
      self.cfg.SetDiskID(disk, node_uuid)
3232
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3233
      if msg:
3234
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3235
                        " continuing anyway", idx,
3236
                        self.cfg.GetNodeName(node_uuid), msg)
3237

    
3238
    # if this is a DRBD disk, return its port to the pool
3239
    if root.dev_type in constants.LDS_DRBD:
3240
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3241

    
3242
  def _CreateNewNic(self, idx, params, private):
3243
    """Creates data structure for a new network interface.
3244

3245
    """
3246
    mac = params[constants.INIC_MAC]
3247
    ip = params.get(constants.INIC_IP, None)
3248
    net = params.get(constants.INIC_NETWORK, None)
3249
    name = params.get(constants.INIC_NAME, None)
3250
    net_uuid = self.cfg.LookupNetwork(net)
3251
    #TODO: not private.filled?? can a nic have no nicparams??
3252
    nicparams = private.filled
3253
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3254
                       nicparams=nicparams)
3255
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3256

    
3257
    return (nobj, [
3258
      ("nic.%d" % idx,
3259
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3260
       (mac, ip, private.filled[constants.NIC_MODE],
3261
       private.filled[constants.NIC_LINK],
3262
       net)),
3263
      ])
3264

    
3265
  def _ApplyNicMods(self, idx, nic, params, private):
3266
    """Modifies a network interface.
3267

3268
    """
3269
    changes = []
3270

    
3271
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3272
      if key in params:
3273
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3274
        setattr(nic, key, params[key])
3275

    
3276
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3277
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3278
    if new_net_uuid != nic.network:
3279
      changes.append(("nic.network/%d" % idx, new_net))
3280
      nic.network = new_net_uuid
3281

    
3282
    if private.filled:
3283
      nic.nicparams = private.filled
3284

    
3285
      for (key, val) in nic.nicparams.items():
3286
        changes.append(("nic.%s/%d" % (key, idx), val))
3287

    
3288
    return changes
3289

    
3290
  def Exec(self, feedback_fn):
3291
    """Modifies an instance.
3292

3293
    All parameters take effect only at the next restart of the instance.
3294

3295
    """
3296
    # Process here the warnings from CheckPrereq, as we don't have a
3297
    # feedback_fn there.
3298
    # TODO: Replace with self.LogWarning
3299
    for warn in self.warn:
3300
      feedback_fn("WARNING: %s" % warn)
3301

    
3302
    assert ((self.op.disk_template is None) ^
3303
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3304
      "Not owning any node resource locks"
3305

    
3306
    result = []
3307

    
3308
    # New primary node
3309
    if self.op.pnode_uuid:
3310
      self.instance.primary_node = self.op.pnode_uuid
3311

    
3312
    # runtime memory
3313
    if self.op.runtime_mem:
3314
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3315
                                                     self.instance,
3316
                                                     self.op.runtime_mem)
3317
      rpcres.Raise("Cannot modify instance runtime memory")
3318
      result.append(("runtime_memory", self.op.runtime_mem))
3319

    
3320
    # Apply disk changes
3321
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3322
                        self._CreateNewDisk, self._ModifyDisk,
3323
                        self._RemoveDisk)
3324
    _UpdateIvNames(0, self.instance.disks)
3325

    
3326
    if self.op.disk_template:
3327
      if __debug__:
3328
        check_nodes = set(self.instance.all_nodes)
3329
        if self.op.remote_node_uuid:
3330
          check_nodes.add(self.op.remote_node_uuid)
3331
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3332
          owned = self.owned_locks(level)
3333
          assert not (check_nodes - owned), \
3334
            ("Not owning the correct locks, owning %r, expected at least %r" %
3335
             (owned, check_nodes))
3336

    
3337
      r_shut = ShutdownInstanceDisks(self, self.instance)
3338
      if not r_shut:
3339
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3340
                                 " proceed with disk template conversion")
3341
      mode = (self.instance.disk_template, self.op.disk_template)
3342
      try:
3343
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3344
      except:
3345
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3346
        raise
3347
      result.append(("disk_template", self.op.disk_template))
3348

    
3349
      assert self.instance.disk_template == self.op.disk_template, \
3350
        ("Expected disk template '%s', found '%s'" %
3351
         (self.op.disk_template, self.instance.disk_template))
3352

    
3353
    # Release node and resource locks if there are any (they might already have
3354
    # been released during disk conversion)
3355
    ReleaseLocks(self, locking.LEVEL_NODE)
3356
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3357

    
3358
    # Apply NIC changes
3359
    if self._new_nics is not None:
3360
      self.instance.nics = self._new_nics
3361
      result.extend(self._nic_chgdesc)
3362

    
3363
    # hvparams changes
3364
    if self.op.hvparams:
3365
      self.instance.hvparams = self.hv_inst
3366
      for key, val in self.op.hvparams.iteritems():
3367
        result.append(("hv/%s" % key, val))
3368

    
3369
    # beparams changes
3370
    if self.op.beparams:
3371
      self.instance.beparams = self.be_inst
3372
      for key, val in self.op.beparams.iteritems():
3373
        result.append(("be/%s" % key, val))
3374

    
3375
    # OS change
3376
    if self.op.os_name:
3377
      self.instance.os = self.op.os_name
3378

    
3379
    # osparams changes
3380
    if self.op.osparams:
3381
      self.instance.osparams = self.os_inst
3382
      for key, val in self.op.osparams.iteritems():
3383
        result.append(("os/%s" % key, val))
3384

    
3385
    if self.op.offline is None:
3386
      # Ignore
3387
      pass
3388
    elif self.op.offline:
3389
      # Mark instance as offline
3390
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3391
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3392
    else:
3393
      # Mark instance as online, but stopped
3394
      self.cfg.MarkInstanceDown(self.instance.uuid)
3395
      result.append(("admin_state", constants.ADMINST_DOWN))
3396

    
3397
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3398

    
3399
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3400
                self.owned_locks(locking.LEVEL_NODE)), \
3401
      "All node locks should have been released by now"
3402

    
3403
    return result
3404

    
3405
  _DISK_CONVERSIONS = {
3406
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3407
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3408
    }
3409

    
3410

    
3411
class LUInstanceChangeGroup(LogicalUnit):
3412
  HPATH = "instance-change-group"
3413
  HTYPE = constants.HTYPE_INSTANCE
3414
  REQ_BGL = False
3415

    
3416
  def ExpandNames(self):
3417
    self.share_locks = ShareAll()
3418

    
3419
    self.needed_locks = {
3420
      locking.LEVEL_NODEGROUP: [],
3421
      locking.LEVEL_NODE: [],
3422
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3423
      }
3424

    
3425
    self._ExpandAndLockInstance()
3426

    
3427
    if self.op.target_groups:
3428
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3429
                                  self.op.target_groups)
3430
    else:
3431
      self.req_target_uuids = None
3432

    
3433
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3434

    
3435
  def DeclareLocks(self, level):
3436
    if level == locking.LEVEL_NODEGROUP:
3437
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3438

    
3439
      if self.req_target_uuids:
3440
        lock_groups = set(self.req_target_uuids)
3441

    
3442
        # Lock all groups used by instance optimistically; this requires going
3443
        # via the node before it's locked, requiring verification later on
3444
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3445
        lock_groups.update(instance_groups)
3446
      else:
3447
        # No target groups, need to lock all of them
3448
        lock_groups = locking.ALL_SET
3449

    
3450
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3451

    
3452
    elif level == locking.LEVEL_NODE:
3453
      if self.req_target_uuids:
3454
        # Lock all nodes used by instances
3455
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3456
        self._LockInstancesNodes()
3457

    
3458
        # Lock all nodes in all potential target groups
3459
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3460
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3461
        member_nodes = [node_uuid
3462
                        for group in lock_groups
3463
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3464
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3465
      else:
3466
        # Lock all nodes as all groups are potential targets
3467
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3468

    
3469
  def CheckPrereq(self):
3470
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3471
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3472
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3473

    
3474
    assert (self.req_target_uuids is None or
3475
            owned_groups.issuperset(self.req_target_uuids))
3476
    assert owned_instance_names == set([self.op.instance_name])
3477

    
3478
    # Get instance information
3479
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3480

    
3481
    # Check if node groups for locked instance are still correct
3482
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3483
      ("Instance %s's nodes changed while we kept the lock" %
3484
       self.op.instance_name)
3485

    
3486
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3487
                                          owned_groups)
3488

    
3489
    if self.req_target_uuids:
3490
      # User requested specific target groups
3491
      self.target_uuids = frozenset(self.req_target_uuids)
3492
    else:
3493
      # All groups except those used by the instance are potential targets
3494
      self.target_uuids = owned_groups - inst_groups
3495

    
3496
    conflicting_groups = self.target_uuids & inst_groups
3497
    if conflicting_groups:
3498
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3499
                                 " used by the instance '%s'" %
3500
                                 (utils.CommaJoin(conflicting_groups),
3501
                                  self.op.instance_name),
3502
                                 errors.ECODE_INVAL)
3503

    
3504
    if not self.target_uuids:
3505
      raise errors.OpPrereqError("There are no possible target groups",
3506
                                 errors.ECODE_INVAL)
3507

    
3508
  def BuildHooksEnv(self):
3509
    """Build hooks env.
3510

3511
    """
3512
    assert self.target_uuids
3513

    
3514
    env = {
3515
      "TARGET_GROUPS": " ".join(self.target_uuids),
3516
      }
3517

    
3518
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3519

    
3520
    return env
3521

    
3522
  def BuildHooksNodes(self):
3523
    """Build hooks nodes.
3524

3525
    """
3526
    mn = self.cfg.GetMasterNode()
3527
    return ([mn], [mn])
3528

    
3529
  def Exec(self, feedback_fn):
3530
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3531

    
3532
    assert instances == [self.op.instance_name], "Instance not locked"
3533

    
3534
    req = iallocator.IAReqGroupChange(instances=instances,
3535
                                      target_groups=list(self.target_uuids))
3536
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3537

    
3538
    ial.Run(self.op.iallocator)
3539

    
3540
    if not ial.success:
3541
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3542
                                 " instance '%s' using iallocator '%s': %s" %
3543
                                 (self.op.instance_name, self.op.iallocator,
3544
                                  ial.info), errors.ECODE_NORES)
3545

    
3546
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3547

    
3548
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3549
                 " instance '%s'", len(jobs), self.op.instance_name)
3550

    
3551
    return ResultWithJobs(jobs)