Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ 7bb0c47f

History | View | Annotate | Download (140.2 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53
  CheckDiskTemplateEnabled
54
from ganeti.cmdlib.instance_storage import CreateDisks, \
55
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59
  CheckSpindlesExclusiveStorage
60
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66

    
67
import ganeti.masterd.instance
68

    
69

    
70
#: Type description for changes as returned by L{_ApplyContainerMods}'s
71
#: callbacks
72
_TApplyContModsCbChanges = \
73
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74
    ht.TNonEmptyString,
75
    ht.TAny,
76
    ])))
77

    
78

    
79
def _CheckHostnameSane(lu, name):
80
  """Ensures that a given hostname resolves to a 'sane' name.
81

82
  The given name is required to be a prefix of the resolved hostname,
83
  to prevent accidental mismatches.
84

85
  @param lu: the logical unit on behalf of which we're checking
86
  @param name: the name we should resolve and check
87
  @return: the resolved hostname object
88

89
  """
90
  hostname = netutils.GetHostname(name=name)
91
  if hostname.name != name:
92
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93
  if not utils.MatchNameComponent(name, [hostname.name]):
94
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95
                                " same as given hostname '%s'") %
96
                               (hostname.name, name), errors.ECODE_INVAL)
97
  return hostname
98

    
99

    
100
def _CheckOpportunisticLocking(op):
101
  """Generate error if opportunistic locking is not possible.
102

103
  """
104
  if op.opportunistic_locking and not op.iallocator:
105
    raise errors.OpPrereqError("Opportunistic locking is only available in"
106
                               " combination with an instance allocator",
107
                               errors.ECODE_INVAL)
108

    
109

    
110
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111
  """Wrapper around IAReqInstanceAlloc.
112

113
  @param op: The instance opcode
114
  @param disks: The computed disks
115
  @param nics: The computed nics
116
  @param beparams: The full filled beparams
117
  @param node_name_whitelist: List of nodes which should appear as online to the
118
    allocator (unless the node is already marked offline)
119

120
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
121

122
  """
123
  spindle_use = beparams[constants.BE_SPINDLE_USE]
124
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125
                                       disk_template=op.disk_template,
126
                                       tags=op.tags,
127
                                       os=op.os_type,
128
                                       vcpus=beparams[constants.BE_VCPUS],
129
                                       memory=beparams[constants.BE_MAXMEM],
130
                                       spindle_use=spindle_use,
131
                                       disks=disks,
132
                                       nics=[n.ToDict() for n in nics],
133
                                       hypervisor=op.hypervisor,
134
                                       node_whitelist=node_name_whitelist)
135

    
136

    
137
def _ComputeFullBeParams(op, cluster):
138
  """Computes the full beparams.
139

140
  @param op: The instance opcode
141
  @param cluster: The cluster config object
142

143
  @return: The fully filled beparams
144

145
  """
146
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
147
  for param, value in op.beparams.iteritems():
148
    if value == constants.VALUE_AUTO:
149
      op.beparams[param] = default_beparams[param]
150
  objects.UpgradeBeParams(op.beparams)
151
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
152
  return cluster.SimpleFillBE(op.beparams)
153

    
154

    
155
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156
  """Computes the nics.
157

158
  @param op: The instance opcode
159
  @param cluster: Cluster configuration object
160
  @param default_ip: The default ip to assign
161
  @param cfg: An instance of the configuration object
162
  @param ec_id: Execution context ID
163

164
  @returns: The build up nics
165

166
  """
167
  nics = []
168
  for nic in op.nics:
169
    nic_mode_req = nic.get(constants.INIC_MODE, None)
170
    nic_mode = nic_mode_req
171
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173

    
174
    net = nic.get(constants.INIC_NETWORK, None)
175
    link = nic.get(constants.NIC_LINK, None)
176
    ip = nic.get(constants.INIC_IP, None)
177

    
178
    if net is None or net.lower() == constants.VALUE_NONE:
179
      net = None
180
    else:
181
      if nic_mode_req is not None or link is not None:
182
        raise errors.OpPrereqError("If network is given, no mode or link"
183
                                   " is allowed to be passed",
184
                                   errors.ECODE_INVAL)
185

    
186
    # ip validity checks
187
    if ip is None or ip.lower() == constants.VALUE_NONE:
188
      nic_ip = None
189
    elif ip.lower() == constants.VALUE_AUTO:
190
      if not op.name_check:
191
        raise errors.OpPrereqError("IP address set to auto but name checks"
192
                                   " have been skipped",
193
                                   errors.ECODE_INVAL)
194
      nic_ip = default_ip
195
    else:
196
      # We defer pool operations until later, so that the iallocator has
197
      # filled in the instance's node(s) dimara
198
      if ip.lower() == constants.NIC_IP_POOL:
199
        if net is None:
200
          raise errors.OpPrereqError("if ip=pool, parameter network"
201
                                     " must be passed too",
202
                                     errors.ECODE_INVAL)
203

    
204
      elif not netutils.IPAddress.IsValid(ip):
205
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206
                                   errors.ECODE_INVAL)
207

    
208
      nic_ip = ip
209

    
210
    # TODO: check the ip address for uniqueness
211
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
213
                                 errors.ECODE_INVAL)
214

    
215
    # MAC address verification
216
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218
      mac = utils.NormalizeAndValidateMac(mac)
219

    
220
      try:
221
        # TODO: We need to factor this out
222
        cfg.ReserveMAC(mac, ec_id)
223
      except errors.ReservationError:
224
        raise errors.OpPrereqError("MAC address %s already in use"
225
                                   " in cluster" % mac,
226
                                   errors.ECODE_NOTUNIQUE)
227

    
228
    #  Build nic parameters
229
    nicparams = {}
230
    if nic_mode_req:
231
      nicparams[constants.NIC_MODE] = nic_mode
232
    if link:
233
      nicparams[constants.NIC_LINK] = link
234

    
235
    check_params = cluster.SimpleFillNIC(nicparams)
236
    objects.NIC.CheckParameterSyntax(check_params)
237
    net_uuid = cfg.LookupNetwork(net)
238
    name = nic.get(constants.INIC_NAME, None)
239
    if name is not None and name.lower() == constants.VALUE_NONE:
240
      name = None
241
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242
                          network=net_uuid, nicparams=nicparams)
243
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
244
    nics.append(nic_obj)
245

    
246
  return nics
247

    
248

    
249
def _CheckForConflictingIp(lu, ip, node_uuid):
250
  """In case of conflicting IP address raise error.
251

252
  @type ip: string
253
  @param ip: IP address
254
  @type node_uuid: string
255
  @param node_uuid: node UUID
256

257
  """
258
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259
  if conf_net is not None:
260
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261
                                " network %s, but the target NIC does not." %
262
                                (ip, conf_net)),
263
                               errors.ECODE_STATE)
264

    
265
  return (None, None)
266

    
267

    
268
def _ComputeIPolicyInstanceSpecViolation(
269
  ipolicy, instance_spec, disk_template,
270
  _compute_fn=ComputeIPolicySpecViolation):
271
  """Compute if instance specs meets the specs of ipolicy.
272

273
  @type ipolicy: dict
274
  @param ipolicy: The ipolicy to verify against
275
  @param instance_spec: dict
276
  @param instance_spec: The instance spec to verify
277
  @type disk_template: string
278
  @param disk_template: the disk template of the instance
279
  @param _compute_fn: The function to verify ipolicy (unittest only)
280
  @see: L{ComputeIPolicySpecViolation}
281

282
  """
283
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289

    
290
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291
                     disk_sizes, spindle_use, disk_template)
292

    
293

    
294
def _CheckOSVariant(os_obj, name):
295
  """Check whether an OS name conforms to the os variants specification.
296

297
  @type os_obj: L{objects.OS}
298
  @param os_obj: OS object to check
299
  @type name: string
300
  @param name: OS name passed by the user, to check for validity
301

302
  """
303
  variant = objects.OS.GetVariant(name)
304
  if not os_obj.supported_variants:
305
    if variant:
306
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307
                                 " passed)" % (os_obj.name, variant),
308
                                 errors.ECODE_INVAL)
309
    return
310
  if not variant:
311
    raise errors.OpPrereqError("OS name must include a variant",
312
                               errors.ECODE_INVAL)
313

    
314
  if variant not in os_obj.supported_variants:
315
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316

    
317

    
318
class LUInstanceCreate(LogicalUnit):
319
  """Create an instance.
320

321
  """
322
  HPATH = "instance-add"
323
  HTYPE = constants.HTYPE_INSTANCE
324
  REQ_BGL = False
325

    
326
  def _CheckDiskTemplateValid(self):
327
    """Checks validity of disk template.
328

329
    """
330
    cluster = self.cfg.GetClusterInfo()
331
    if self.op.disk_template is None:
332
      # FIXME: It would be better to take the default disk template from the
333
      # ipolicy, but for the ipolicy we need the primary node, which we get from
334
      # the iallocator, which wants the disk template as input. To solve this
335
      # chicken-and-egg problem, it should be possible to specify just a node
336
      # group from the iallocator and take the ipolicy from that.
337
      self.op.disk_template = cluster.enabled_disk_templates[0]
338
    CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339

    
340
  def _CheckDiskArguments(self):
341
    """Checks validity of disk-related arguments.
342

343
    """
344
    # check that disk's names are unique and valid
345
    utils.ValidateDeviceNames("disk", self.op.disks)
346

    
347
    self._CheckDiskTemplateValid()
348

    
349
    # check disks. parameter names and consistent adopt/no-adopt strategy
350
    has_adopt = has_no_adopt = False
351
    for disk in self.op.disks:
352
      if self.op.disk_template != constants.DT_EXT:
353
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
354
      if constants.IDISK_ADOPT in disk:
355
        has_adopt = True
356
      else:
357
        has_no_adopt = True
358
    if has_adopt and has_no_adopt:
359
      raise errors.OpPrereqError("Either all disks are adopted or none is",
360
                                 errors.ECODE_INVAL)
361
    if has_adopt:
362
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
363
        raise errors.OpPrereqError("Disk adoption is not supported for the"
364
                                   " '%s' disk template" %
365
                                   self.op.disk_template,
366
                                   errors.ECODE_INVAL)
367
      if self.op.iallocator is not None:
368
        raise errors.OpPrereqError("Disk adoption not allowed with an"
369
                                   " iallocator script", errors.ECODE_INVAL)
370
      if self.op.mode == constants.INSTANCE_IMPORT:
371
        raise errors.OpPrereqError("Disk adoption not allowed for"
372
                                   " instance import", errors.ECODE_INVAL)
373
    else:
374
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
375
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
376
                                   " but no 'adopt' parameter given" %
377
                                   self.op.disk_template,
378
                                   errors.ECODE_INVAL)
379

    
380
    self.adopt_disks = has_adopt
381

    
382
  def CheckArguments(self):
383
    """Check arguments.
384

385
    """
386
    # do not require name_check to ease forward/backward compatibility
387
    # for tools
388
    if self.op.no_install and self.op.start:
389
      self.LogInfo("No-installation mode selected, disabling startup")
390
      self.op.start = False
391
    # validate/normalize the instance name
392
    self.op.instance_name = \
393
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
394

    
395
    if self.op.ip_check and not self.op.name_check:
396
      # TODO: make the ip check more flexible and not depend on the name check
397
      raise errors.OpPrereqError("Cannot do IP address check without a name"
398
                                 " check", errors.ECODE_INVAL)
399

    
400
    # check nics' parameter names
401
    for nic in self.op.nics:
402
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403
    # check that NIC's parameters names are unique and valid
404
    utils.ValidateDeviceNames("NIC", self.op.nics)
405

    
406
    self._CheckDiskArguments()
407

    
408
    # instance name verification
409
    if self.op.name_check:
410
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411
      self.op.instance_name = self.hostname.name
412
      # used in CheckPrereq for ip ping check
413
      self.check_ip = self.hostname.ip
414
    else:
415
      self.check_ip = None
416

    
417
    # file storage checks
418
    if (self.op.file_driver and
419
        not self.op.file_driver in constants.FILE_DRIVER):
420
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
421
                                 self.op.file_driver, errors.ECODE_INVAL)
422

    
423
    ### Node/iallocator related checks
424
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
425

    
426
    if self.op.pnode is not None:
427
      if self.op.disk_template in constants.DTS_INT_MIRROR:
428
        if self.op.snode is None:
429
          raise errors.OpPrereqError("The networked disk templates need"
430
                                     " a mirror node", errors.ECODE_INVAL)
431
      elif self.op.snode:
432
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
433
                        " template")
434
        self.op.snode = None
435

    
436
    _CheckOpportunisticLocking(self.op)
437

    
438
    self._cds = GetClusterDomainSecret()
439

    
440
    if self.op.mode == constants.INSTANCE_IMPORT:
441
      # On import force_variant must be True, because if we forced it at
442
      # initial install, our only chance when importing it back is that it
443
      # works again!
444
      self.op.force_variant = True
445

    
446
      if self.op.no_install:
447
        self.LogInfo("No-installation mode has no effect during import")
448

    
449
    elif self.op.mode == constants.INSTANCE_CREATE:
450
      if self.op.os_type is None:
451
        raise errors.OpPrereqError("No guest OS specified",
452
                                   errors.ECODE_INVAL)
453
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
454
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
455
                                   " installation" % self.op.os_type,
456
                                   errors.ECODE_STATE)
457
      if self.op.disk_template is None:
458
        raise errors.OpPrereqError("No disk template specified",
459
                                   errors.ECODE_INVAL)
460

    
461
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
462
      # Check handshake to ensure both clusters have the same domain secret
463
      src_handshake = self.op.source_handshake
464
      if not src_handshake:
465
        raise errors.OpPrereqError("Missing source handshake",
466
                                   errors.ECODE_INVAL)
467

    
468
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
469
                                                           src_handshake)
470
      if errmsg:
471
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
472
                                   errors.ECODE_INVAL)
473

    
474
      # Load and check source CA
475
      self.source_x509_ca_pem = self.op.source_x509_ca
476
      if not self.source_x509_ca_pem:
477
        raise errors.OpPrereqError("Missing source X509 CA",
478
                                   errors.ECODE_INVAL)
479

    
480
      try:
481
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
482
                                                    self._cds)
483
      except OpenSSL.crypto.Error, err:
484
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
485
                                   (err, ), errors.ECODE_INVAL)
486

    
487
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
488
      if errcode is not None:
489
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
490
                                   errors.ECODE_INVAL)
491

    
492
      self.source_x509_ca = cert
493

    
494
      src_instance_name = self.op.source_instance_name
495
      if not src_instance_name:
496
        raise errors.OpPrereqError("Missing source instance name",
497
                                   errors.ECODE_INVAL)
498

    
499
      self.source_instance_name = \
500
        netutils.GetHostname(name=src_instance_name).name
501

    
502
    else:
503
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
504
                                 self.op.mode, errors.ECODE_INVAL)
505

    
506
  def ExpandNames(self):
507
    """ExpandNames for CreateInstance.
508

509
    Figure out the right locks for instance creation.
510

511
    """
512
    self.needed_locks = {}
513

    
514
    # this is just a preventive check, but someone might still add this
515
    # instance in the meantime, and creation will fail at lock-add time
516
    if self.op.instance_name in\
517
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
518
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
519
                                 self.op.instance_name, errors.ECODE_EXISTS)
520

    
521
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
522

    
523
    if self.op.iallocator:
524
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
525
      # specifying a group on instance creation and then selecting nodes from
526
      # that group
527
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
528
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
529

    
530
      if self.op.opportunistic_locking:
531
        self.opportunistic_locks[locking.LEVEL_NODE] = True
532
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
533
    else:
534
      (self.op.pnode_uuid, self.op.pnode) = \
535
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
536
      nodelist = [self.op.pnode_uuid]
537
      if self.op.snode is not None:
538
        (self.op.snode_uuid, self.op.snode) = \
539
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
540
        nodelist.append(self.op.snode_uuid)
541
      self.needed_locks[locking.LEVEL_NODE] = nodelist
542

    
543
    # in case of import lock the source node too
544
    if self.op.mode == constants.INSTANCE_IMPORT:
545
      src_node = self.op.src_node
546
      src_path = self.op.src_path
547

    
548
      if src_path is None:
549
        self.op.src_path = src_path = self.op.instance_name
550

    
551
      if src_node is None:
552
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
553
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
554
        self.op.src_node = None
555
        if os.path.isabs(src_path):
556
          raise errors.OpPrereqError("Importing an instance from a path"
557
                                     " requires a source node option",
558
                                     errors.ECODE_INVAL)
559
      else:
560
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
561
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
562
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
563
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
564
        if not os.path.isabs(src_path):
565
          self.op.src_path = \
566
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
567

    
568
    self.needed_locks[locking.LEVEL_NODE_RES] = \
569
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
570

    
571
  def _RunAllocator(self):
572
    """Run the allocator based on input opcode.
573

574
    """
575
    if self.op.opportunistic_locking:
576
      # Only consider nodes for which a lock is held
577
      node_name_whitelist = self.cfg.GetNodeNames(
578
        self.owned_locks(locking.LEVEL_NODE))
579
    else:
580
      node_name_whitelist = None
581

    
582
    #TODO Export network to iallocator so that it chooses a pnode
583
    #     in a nodegroup that has the desired network connected to
584
    req = _CreateInstanceAllocRequest(self.op, self.disks,
585
                                      self.nics, self.be_full,
586
                                      node_name_whitelist)
587
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
588

    
589
    ial.Run(self.op.iallocator)
590

    
591
    if not ial.success:
592
      # When opportunistic locks are used only a temporary failure is generated
593
      if self.op.opportunistic_locking:
594
        ecode = errors.ECODE_TEMP_NORES
595
      else:
596
        ecode = errors.ECODE_NORES
597

    
598
      raise errors.OpPrereqError("Can't compute nodes using"
599
                                 " iallocator '%s': %s" %
600
                                 (self.op.iallocator, ial.info),
601
                                 ecode)
602

    
603
    (self.op.pnode_uuid, self.op.pnode) = \
604
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
605
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
606
                 self.op.instance_name, self.op.iallocator,
607
                 utils.CommaJoin(ial.result))
608

    
609
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
610

    
611
    if req.RequiredNodes() == 2:
612
      (self.op.snode_uuid, self.op.snode) = \
613
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
614

    
615
  def BuildHooksEnv(self):
616
    """Build hooks env.
617

618
    This runs on master, primary and secondary nodes of the instance.
619

620
    """
621
    env = {
622
      "ADD_MODE": self.op.mode,
623
      }
624
    if self.op.mode == constants.INSTANCE_IMPORT:
625
      env["SRC_NODE"] = self.op.src_node
626
      env["SRC_PATH"] = self.op.src_path
627
      env["SRC_IMAGES"] = self.src_images
628

    
629
    env.update(BuildInstanceHookEnv(
630
      name=self.op.instance_name,
631
      primary_node_name=self.op.pnode,
632
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
633
      status=self.op.start,
634
      os_type=self.op.os_type,
635
      minmem=self.be_full[constants.BE_MINMEM],
636
      maxmem=self.be_full[constants.BE_MAXMEM],
637
      vcpus=self.be_full[constants.BE_VCPUS],
638
      nics=NICListToTuple(self, self.nics),
639
      disk_template=self.op.disk_template,
640
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
641
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
642
             for d in self.disks],
643
      bep=self.be_full,
644
      hvp=self.hv_full,
645
      hypervisor_name=self.op.hypervisor,
646
      tags=self.op.tags,
647
      ))
648

    
649
    return env
650

    
651
  def BuildHooksNodes(self):
652
    """Build hooks nodes.
653

654
    """
655
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
656
    return nl, nl
657

    
658
  def _ReadExportInfo(self):
659
    """Reads the export information from disk.
660

661
    It will override the opcode source node and path with the actual
662
    information, if these two were not specified before.
663

664
    @return: the export information
665

666
    """
667
    assert self.op.mode == constants.INSTANCE_IMPORT
668

    
669
    if self.op.src_node_uuid is None:
670
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
671
      exp_list = self.rpc.call_export_list(locked_nodes)
672
      found = False
673
      for node_uuid in exp_list:
674
        if exp_list[node_uuid].fail_msg:
675
          continue
676
        if self.op.src_path in exp_list[node_uuid].payload:
677
          found = True
678
          self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
679
          self.op.src_node_uuid = node_uuid
680
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
681
                                            self.op.src_path)
682
          break
683
      if not found:
684
        raise errors.OpPrereqError("No export found for relative path %s" %
685
                                   self.op.src_path, errors.ECODE_INVAL)
686

    
687
    CheckNodeOnline(self, self.op.src_node_uuid)
688
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
689
    result.Raise("No export or invalid export found in dir %s" %
690
                 self.op.src_path)
691

    
692
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
693
    if not export_info.has_section(constants.INISECT_EXP):
694
      raise errors.ProgrammerError("Corrupted export config",
695
                                   errors.ECODE_ENVIRON)
696

    
697
    ei_version = export_info.get(constants.INISECT_EXP, "version")
698
    if int(ei_version) != constants.EXPORT_VERSION:
699
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
700
                                 (ei_version, constants.EXPORT_VERSION),
701
                                 errors.ECODE_ENVIRON)
702
    return export_info
703

    
704
  def _ReadExportParams(self, einfo):
705
    """Use export parameters as defaults.
706

707
    In case the opcode doesn't specify (as in override) some instance
708
    parameters, then try to use them from the export information, if
709
    that declares them.
710

711
    """
712
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
713

    
714
    if not self.op.disks:
715
      disks = []
716
      # TODO: import the disk iv_name too
717
      for idx in range(constants.MAX_DISKS):
718
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
719
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
720
          disks.append({constants.IDISK_SIZE: disk_sz})
721
      self.op.disks = disks
722
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
723
        raise errors.OpPrereqError("No disk info specified and the export"
724
                                   " is missing the disk information",
725
                                   errors.ECODE_INVAL)
726

    
727
    if not self.op.nics:
728
      nics = []
729
      for idx in range(constants.MAX_NICS):
730
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
731
          ndict = {}
732
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
733
            nic_param_name = "nic%d_%s" % (idx, name)
734
            if einfo.has_option(constants.INISECT_INS, nic_param_name):
735
              v = einfo.get(constants.INISECT_INS, nic_param_name)
736
              ndict[name] = v
737
          nics.append(ndict)
738
        else:
739
          break
740
      self.op.nics = nics
741

    
742
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
743
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
744

    
745
    if (self.op.hypervisor is None and
746
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
747
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
748

    
749
    if einfo.has_section(constants.INISECT_HYP):
750
      # use the export parameters but do not override the ones
751
      # specified by the user
752
      for name, value in einfo.items(constants.INISECT_HYP):
753
        if name not in self.op.hvparams:
754
          self.op.hvparams[name] = value
755

    
756
    if einfo.has_section(constants.INISECT_BEP):
757
      # use the parameters, without overriding
758
      for name, value in einfo.items(constants.INISECT_BEP):
759
        if name not in self.op.beparams:
760
          self.op.beparams[name] = value
761
        # Compatibility for the old "memory" be param
762
        if name == constants.BE_MEMORY:
763
          if constants.BE_MAXMEM not in self.op.beparams:
764
            self.op.beparams[constants.BE_MAXMEM] = value
765
          if constants.BE_MINMEM not in self.op.beparams:
766
            self.op.beparams[constants.BE_MINMEM] = value
767
    else:
768
      # try to read the parameters old style, from the main section
769
      for name in constants.BES_PARAMETERS:
770
        if (name not in self.op.beparams and
771
            einfo.has_option(constants.INISECT_INS, name)):
772
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
773

    
774
    if einfo.has_section(constants.INISECT_OSP):
775
      # use the parameters, without overriding
776
      for name, value in einfo.items(constants.INISECT_OSP):
777
        if name not in self.op.osparams:
778
          self.op.osparams[name] = value
779

    
780
  def _RevertToDefaults(self, cluster):
781
    """Revert the instance parameters to the default values.
782

783
    """
784
    # hvparams
785
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
786
    for name in self.op.hvparams.keys():
787
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
788
        del self.op.hvparams[name]
789
    # beparams
790
    be_defs = cluster.SimpleFillBE({})
791
    for name in self.op.beparams.keys():
792
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
793
        del self.op.beparams[name]
794
    # nic params
795
    nic_defs = cluster.SimpleFillNIC({})
796
    for nic in self.op.nics:
797
      for name in constants.NICS_PARAMETERS:
798
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
799
          del nic[name]
800
    # osparams
801
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
802
    for name in self.op.osparams.keys():
803
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
804
        del self.op.osparams[name]
805

    
806
  def _CalculateFileStorageDir(self):
807
    """Calculate final instance file storage dir.
808

809
    """
810
    # file storage dir calculation/check
811
    self.instance_file_storage_dir = None
812
    if self.op.disk_template in constants.DTS_FILEBASED:
813
      # build the full file storage dir path
814
      joinargs = []
815

    
816
      if self.op.disk_template == constants.DT_SHARED_FILE:
817
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
818
      else:
819
        get_fsd_fn = self.cfg.GetFileStorageDir
820

    
821
      cfg_storagedir = get_fsd_fn()
822
      if not cfg_storagedir:
823
        raise errors.OpPrereqError("Cluster file storage dir not defined",
824
                                   errors.ECODE_STATE)
825
      joinargs.append(cfg_storagedir)
826

    
827
      if self.op.file_storage_dir is not None:
828
        joinargs.append(self.op.file_storage_dir)
829

    
830
      joinargs.append(self.op.instance_name)
831

    
832
      # pylint: disable=W0142
833
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
834

    
835
  def CheckPrereq(self): # pylint: disable=R0914
836
    """Check prerequisites.
837

838
    """
839
    self._CalculateFileStorageDir()
840

    
841
    if self.op.mode == constants.INSTANCE_IMPORT:
842
      export_info = self._ReadExportInfo()
843
      self._ReadExportParams(export_info)
844
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
845
    else:
846
      self._old_instance_name = None
847

    
848
    if (not self.cfg.GetVGName() and
849
        self.op.disk_template not in constants.DTS_NOT_LVM):
850
      raise errors.OpPrereqError("Cluster does not support lvm-based"
851
                                 " instances", errors.ECODE_STATE)
852

    
853
    if (self.op.hypervisor is None or
854
        self.op.hypervisor == constants.VALUE_AUTO):
855
      self.op.hypervisor = self.cfg.GetHypervisorType()
856

    
857
    cluster = self.cfg.GetClusterInfo()
858
    enabled_hvs = cluster.enabled_hypervisors
859
    if self.op.hypervisor not in enabled_hvs:
860
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
861
                                 " cluster (%s)" %
862
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
863
                                 errors.ECODE_STATE)
864

    
865
    # Check tag validity
866
    for tag in self.op.tags:
867
      objects.TaggableObject.ValidateTag(tag)
868

    
869
    # check hypervisor parameter syntax (locally)
870
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
871
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
872
                                      self.op.hvparams)
873
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
874
    hv_type.CheckParameterSyntax(filled_hvp)
875
    self.hv_full = filled_hvp
876
    # check that we don't specify global parameters on an instance
877
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
878
                         "instance", "cluster")
879

    
880
    # fill and remember the beparams dict
881
    self.be_full = _ComputeFullBeParams(self.op, cluster)
882

    
883
    # build os parameters
884
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
885

    
886
    # now that hvp/bep are in final format, let's reset to defaults,
887
    # if told to do so
888
    if self.op.identify_defaults:
889
      self._RevertToDefaults(cluster)
890

    
891
    # NIC buildup
892
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
893
                             self.proc.GetECId())
894

    
895
    # disk checks/pre-build
896
    default_vg = self.cfg.GetVGName()
897
    self.disks = ComputeDisks(self.op, default_vg)
898

    
899
    if self.op.mode == constants.INSTANCE_IMPORT:
900
      disk_images = []
901
      for idx in range(len(self.disks)):
902
        option = "disk%d_dump" % idx
903
        if export_info.has_option(constants.INISECT_INS, option):
904
          # FIXME: are the old os-es, disk sizes, etc. useful?
905
          export_name = export_info.get(constants.INISECT_INS, option)
906
          image = utils.PathJoin(self.op.src_path, export_name)
907
          disk_images.append(image)
908
        else:
909
          disk_images.append(False)
910

    
911
      self.src_images = disk_images
912

    
913
      if self.op.instance_name == self._old_instance_name:
914
        for idx, nic in enumerate(self.nics):
915
          if nic.mac == constants.VALUE_AUTO:
916
            nic_mac_ini = "nic%d_mac" % idx
917
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
918

    
919
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
920

    
921
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
922
    if self.op.ip_check:
923
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
924
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
925
                                   (self.check_ip, self.op.instance_name),
926
                                   errors.ECODE_NOTUNIQUE)
927

    
928
    #### mac address generation
929
    # By generating here the mac address both the allocator and the hooks get
930
    # the real final mac address rather than the 'auto' or 'generate' value.
931
    # There is a race condition between the generation and the instance object
932
    # creation, which means that we know the mac is valid now, but we're not
933
    # sure it will be when we actually add the instance. If things go bad
934
    # adding the instance will abort because of a duplicate mac, and the
935
    # creation job will fail.
936
    for nic in self.nics:
937
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
938
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
939

    
940
    #### allocator run
941

    
942
    if self.op.iallocator is not None:
943
      self._RunAllocator()
944

    
945
    # Release all unneeded node locks
946
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
947
                               self.op.src_node_uuid])
948
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
949
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
950
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
951

    
952
    assert (self.owned_locks(locking.LEVEL_NODE) ==
953
            self.owned_locks(locking.LEVEL_NODE_RES)), \
954
      "Node locks differ from node resource locks"
955

    
956
    #### node related checks
957

    
958
    # check primary node
959
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
960
    assert self.pnode is not None, \
961
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
962
    if pnode.offline:
963
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
964
                                 pnode.name, errors.ECODE_STATE)
965
    if pnode.drained:
966
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
967
                                 pnode.name, errors.ECODE_STATE)
968
    if not pnode.vm_capable:
969
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
970
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
971

    
972
    self.secondaries = []
973

    
974
    # Fill in any IPs from IP pools. This must happen here, because we need to
975
    # know the nic's primary node, as specified by the iallocator
976
    for idx, nic in enumerate(self.nics):
977
      net_uuid = nic.network
978
      if net_uuid is not None:
979
        nobj = self.cfg.GetNetwork(net_uuid)
980
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
981
        if netparams is None:
982
          raise errors.OpPrereqError("No netparams found for network"
983
                                     " %s. Propably not connected to"
984
                                     " node's %s nodegroup" %
985
                                     (nobj.name, self.pnode.name),
986
                                     errors.ECODE_INVAL)
987
        self.LogInfo("NIC/%d inherits netparams %s" %
988
                     (idx, netparams.values()))
989
        nic.nicparams = dict(netparams)
990
        if nic.ip is not None:
991
          if nic.ip.lower() == constants.NIC_IP_POOL:
992
            try:
993
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
994
            except errors.ReservationError:
995
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
996
                                         " from the address pool" % idx,
997
                                         errors.ECODE_STATE)
998
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
999
          else:
1000
            try:
1001
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1002
            except errors.ReservationError:
1003
              raise errors.OpPrereqError("IP address %s already in use"
1004
                                         " or does not belong to network %s" %
1005
                                         (nic.ip, nobj.name),
1006
                                         errors.ECODE_NOTUNIQUE)
1007

    
1008
      # net is None, ip None or given
1009
      elif self.op.conflicts_check:
1010
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1011

    
1012
    # mirror node verification
1013
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1014
      if self.op.snode_uuid == pnode.uuid:
1015
        raise errors.OpPrereqError("The secondary node cannot be the"
1016
                                   " primary node", errors.ECODE_INVAL)
1017
      CheckNodeOnline(self, self.op.snode_uuid)
1018
      CheckNodeNotDrained(self, self.op.snode_uuid)
1019
      CheckNodeVmCapable(self, self.op.snode_uuid)
1020
      self.secondaries.append(self.op.snode_uuid)
1021

    
1022
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1023
      if pnode.group != snode.group:
1024
        self.LogWarning("The primary and secondary nodes are in two"
1025
                        " different node groups; the disk parameters"
1026
                        " from the first disk's node group will be"
1027
                        " used")
1028

    
1029
    nodes = [pnode]
1030
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1031
      nodes.append(snode)
1032
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1033
    excl_stor = compat.any(map(has_es, nodes))
1034
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1035
      raise errors.OpPrereqError("Disk template %s not supported with"
1036
                                 " exclusive storage" % self.op.disk_template,
1037
                                 errors.ECODE_STATE)
1038
    for disk in self.disks:
1039
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1040

    
1041
    node_uuids = [pnode.uuid] + self.secondaries
1042

    
1043
    if not self.adopt_disks:
1044
      if self.op.disk_template == constants.DT_RBD:
1045
        # _CheckRADOSFreeSpace() is just a placeholder.
1046
        # Any function that checks prerequisites can be placed here.
1047
        # Check if there is enough space on the RADOS cluster.
1048
        CheckRADOSFreeSpace()
1049
      elif self.op.disk_template == constants.DT_EXT:
1050
        # FIXME: Function that checks prereqs if needed
1051
        pass
1052
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1053
        # Check lv size requirements, if not adopting
1054
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1055
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1056
      else:
1057
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1058
        pass
1059

    
1060
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1061
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1062
                                disk[constants.IDISK_ADOPT])
1063
                     for disk in self.disks])
1064
      if len(all_lvs) != len(self.disks):
1065
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1066
                                   errors.ECODE_INVAL)
1067
      for lv_name in all_lvs:
1068
        try:
1069
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1070
          # to ReserveLV uses the same syntax
1071
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1072
        except errors.ReservationError:
1073
          raise errors.OpPrereqError("LV named %s used by another instance" %
1074
                                     lv_name, errors.ECODE_NOTUNIQUE)
1075

    
1076
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1077
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1078

    
1079
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1080
                                       vg_names.payload.keys())[pnode.uuid]
1081
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1082
      node_lvs = node_lvs.payload
1083

    
1084
      delta = all_lvs.difference(node_lvs.keys())
1085
      if delta:
1086
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1087
                                   utils.CommaJoin(delta),
1088
                                   errors.ECODE_INVAL)
1089
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1090
      if online_lvs:
1091
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1092
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1093
                                   errors.ECODE_STATE)
1094
      # update the size of disk based on what is found
1095
      for dsk in self.disks:
1096
        dsk[constants.IDISK_SIZE] = \
1097
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1098
                                        dsk[constants.IDISK_ADOPT])][0]))
1099

    
1100
    elif self.op.disk_template == constants.DT_BLOCK:
1101
      # Normalize and de-duplicate device paths
1102
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1103
                       for disk in self.disks])
1104
      if len(all_disks) != len(self.disks):
1105
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1106
                                   errors.ECODE_INVAL)
1107
      baddisks = [d for d in all_disks
1108
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1109
      if baddisks:
1110
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1111
                                   " cannot be adopted" %
1112
                                   (utils.CommaJoin(baddisks),
1113
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1114
                                   errors.ECODE_INVAL)
1115

    
1116
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1117
                                            list(all_disks))[pnode.uuid]
1118
      node_disks.Raise("Cannot get block device information from node %s" %
1119
                       pnode.name)
1120
      node_disks = node_disks.payload
1121
      delta = all_disks.difference(node_disks.keys())
1122
      if delta:
1123
        raise errors.OpPrereqError("Missing block device(s): %s" %
1124
                                   utils.CommaJoin(delta),
1125
                                   errors.ECODE_INVAL)
1126
      for dsk in self.disks:
1127
        dsk[constants.IDISK_SIZE] = \
1128
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1129

    
1130
    # Verify instance specs
1131
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1132
    ispec = {
1133
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1134
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1135
      constants.ISPEC_DISK_COUNT: len(self.disks),
1136
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1137
                                  for disk in self.disks],
1138
      constants.ISPEC_NIC_COUNT: len(self.nics),
1139
      constants.ISPEC_SPINDLE_USE: spindle_use,
1140
      }
1141

    
1142
    group_info = self.cfg.GetNodeGroup(pnode.group)
1143
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1144
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1145
                                               self.op.disk_template)
1146
    if not self.op.ignore_ipolicy and res:
1147
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1148
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1149
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1150

    
1151
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1152

    
1153
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1154
    # check OS parameters (remotely)
1155
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1156

    
1157
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1158

    
1159
    #TODO: _CheckExtParams (remotely)
1160
    # Check parameters for extstorage
1161

    
1162
    # memory check on primary node
1163
    #TODO(dynmem): use MINMEM for checking
1164
    if self.op.start:
1165
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1166
                                self.op.hvparams)
1167
      CheckNodeFreeMemory(self, self.pnode.uuid,
1168
                          "creating instance %s" % self.op.instance_name,
1169
                          self.be_full[constants.BE_MAXMEM],
1170
                          self.op.hypervisor, hvfull)
1171

    
1172
    self.dry_run_result = list(node_uuids)
1173

    
1174
  def Exec(self, feedback_fn):
1175
    """Create and add the instance to the cluster.
1176

1177
    """
1178
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1179
                self.owned_locks(locking.LEVEL_NODE)), \
1180
      "Node locks differ from node resource locks"
1181
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1182

    
1183
    ht_kind = self.op.hypervisor
1184
    if ht_kind in constants.HTS_REQ_PORT:
1185
      network_port = self.cfg.AllocatePort()
1186
    else:
1187
      network_port = None
1188

    
1189
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1190

    
1191
    # This is ugly but we got a chicken-egg problem here
1192
    # We can only take the group disk parameters, as the instance
1193
    # has no disks yet (we are generating them right here).
1194
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1195
    disks = GenerateDiskTemplate(self,
1196
                                 self.op.disk_template,
1197
                                 instance_uuid, self.pnode.uuid,
1198
                                 self.secondaries,
1199
                                 self.disks,
1200
                                 self.instance_file_storage_dir,
1201
                                 self.op.file_driver,
1202
                                 0,
1203
                                 feedback_fn,
1204
                                 self.cfg.GetGroupDiskParams(nodegroup))
1205

    
1206
    iobj = objects.Instance(name=self.op.instance_name,
1207
                            uuid=instance_uuid,
1208
                            os=self.op.os_type,
1209
                            primary_node=self.pnode.uuid,
1210
                            nics=self.nics, disks=disks,
1211
                            disk_template=self.op.disk_template,
1212
                            disks_active=False,
1213
                            admin_state=constants.ADMINST_DOWN,
1214
                            network_port=network_port,
1215
                            beparams=self.op.beparams,
1216
                            hvparams=self.op.hvparams,
1217
                            hypervisor=self.op.hypervisor,
1218
                            osparams=self.op.osparams,
1219
                            )
1220

    
1221
    if self.op.tags:
1222
      for tag in self.op.tags:
1223
        iobj.AddTag(tag)
1224

    
1225
    if self.adopt_disks:
1226
      if self.op.disk_template == constants.DT_PLAIN:
1227
        # rename LVs to the newly-generated names; we need to construct
1228
        # 'fake' LV disks with the old data, plus the new unique_id
1229
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1230
        rename_to = []
1231
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1232
          rename_to.append(t_dsk.logical_id)
1233
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1234
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1235
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1236
                                               zip(tmp_disks, rename_to))
1237
        result.Raise("Failed to rename adoped LVs")
1238
    else:
1239
      feedback_fn("* creating instance disks...")
1240
      try:
1241
        CreateDisks(self, iobj)
1242
      except errors.OpExecError:
1243
        self.LogWarning("Device creation failed")
1244
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1245
        raise
1246

    
1247
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1248

    
1249
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1250

    
1251
    # Declare that we don't want to remove the instance lock anymore, as we've
1252
    # added the instance to the config
1253
    del self.remove_locks[locking.LEVEL_INSTANCE]
1254

    
1255
    if self.op.mode == constants.INSTANCE_IMPORT:
1256
      # Release unused nodes
1257
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1258
    else:
1259
      # Release all nodes
1260
      ReleaseLocks(self, locking.LEVEL_NODE)
1261

    
1262
    disk_abort = False
1263
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1264
      feedback_fn("* wiping instance disks...")
1265
      try:
1266
        WipeDisks(self, iobj)
1267
      except errors.OpExecError, err:
1268
        logging.exception("Wiping disks failed")
1269
        self.LogWarning("Wiping instance disks failed (%s)", err)
1270
        disk_abort = True
1271

    
1272
    if disk_abort:
1273
      # Something is already wrong with the disks, don't do anything else
1274
      pass
1275
    elif self.op.wait_for_sync:
1276
      disk_abort = not WaitForSync(self, iobj)
1277
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1278
      # make sure the disks are not degraded (still sync-ing is ok)
1279
      feedback_fn("* checking mirrors status")
1280
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1281
    else:
1282
      disk_abort = False
1283

    
1284
    if disk_abort:
1285
      RemoveDisks(self, iobj)
1286
      self.cfg.RemoveInstance(iobj.uuid)
1287
      # Make sure the instance lock gets removed
1288
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1289
      raise errors.OpExecError("There are some degraded disks for"
1290
                               " this instance")
1291

    
1292
    # instance disks are now active
1293
    iobj.disks_active = True
1294

    
1295
    # Release all node resource locks
1296
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1297

    
1298
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1299
      # we need to set the disks ID to the primary node, since the
1300
      # preceding code might or might have not done it, depending on
1301
      # disk template and other options
1302
      for disk in iobj.disks:
1303
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1304
      if self.op.mode == constants.INSTANCE_CREATE:
1305
        if not self.op.no_install:
1306
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1307
                        not self.op.wait_for_sync)
1308
          if pause_sync:
1309
            feedback_fn("* pausing disk sync to install instance OS")
1310
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1311
                                                              (iobj.disks,
1312
                                                               iobj), True)
1313
            for idx, success in enumerate(result.payload):
1314
              if not success:
1315
                logging.warn("pause-sync of instance %s for disk %d failed",
1316
                             self.op.instance_name, idx)
1317

    
1318
          feedback_fn("* running the instance OS create scripts...")
1319
          # FIXME: pass debug option from opcode to backend
1320
          os_add_result = \
1321
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1322
                                          self.op.debug_level)
1323
          if pause_sync:
1324
            feedback_fn("* resuming disk sync")
1325
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1326
                                                              (iobj.disks,
1327
                                                               iobj), False)
1328
            for idx, success in enumerate(result.payload):
1329
              if not success:
1330
                logging.warn("resume-sync of instance %s for disk %d failed",
1331
                             self.op.instance_name, idx)
1332

    
1333
          os_add_result.Raise("Could not add os for instance %s"
1334
                              " on node %s" % (self.op.instance_name,
1335
                                               self.pnode.name))
1336

    
1337
      else:
1338
        if self.op.mode == constants.INSTANCE_IMPORT:
1339
          feedback_fn("* running the instance OS import scripts...")
1340

    
1341
          transfers = []
1342

    
1343
          for idx, image in enumerate(self.src_images):
1344
            if not image:
1345
              continue
1346

    
1347
            # FIXME: pass debug option from opcode to backend
1348
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1349
                                               constants.IEIO_FILE, (image, ),
1350
                                               constants.IEIO_SCRIPT,
1351
                                               (iobj.disks[idx], idx),
1352
                                               None)
1353
            transfers.append(dt)
1354

    
1355
          import_result = \
1356
            masterd.instance.TransferInstanceData(self, feedback_fn,
1357
                                                  self.op.src_node_uuid,
1358
                                                  self.pnode.uuid,
1359
                                                  self.pnode.secondary_ip,
1360
                                                  iobj, transfers)
1361
          if not compat.all(import_result):
1362
            self.LogWarning("Some disks for instance %s on node %s were not"
1363
                            " imported successfully" % (self.op.instance_name,
1364
                                                        self.pnode.name))
1365

    
1366
          rename_from = self._old_instance_name
1367

    
1368
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1369
          feedback_fn("* preparing remote import...")
1370
          # The source cluster will stop the instance before attempting to make
1371
          # a connection. In some cases stopping an instance can take a long
1372
          # time, hence the shutdown timeout is added to the connection
1373
          # timeout.
1374
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1375
                             self.op.source_shutdown_timeout)
1376
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1377

    
1378
          assert iobj.primary_node == self.pnode.uuid
1379
          disk_results = \
1380
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1381
                                          self.source_x509_ca,
1382
                                          self._cds, timeouts)
1383
          if not compat.all(disk_results):
1384
            # TODO: Should the instance still be started, even if some disks
1385
            # failed to import (valid for local imports, too)?
1386
            self.LogWarning("Some disks for instance %s on node %s were not"
1387
                            " imported successfully" % (self.op.instance_name,
1388
                                                        self.pnode.name))
1389

    
1390
          rename_from = self.source_instance_name
1391

    
1392
        else:
1393
          # also checked in the prereq part
1394
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1395
                                       % self.op.mode)
1396

    
1397
        # Run rename script on newly imported instance
1398
        assert iobj.name == self.op.instance_name
1399
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1400
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1401
                                                   rename_from,
1402
                                                   self.op.debug_level)
1403
        result.Warn("Failed to run rename script for %s on node %s" %
1404
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1405

    
1406
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1407

    
1408
    if self.op.start:
1409
      iobj.admin_state = constants.ADMINST_UP
1410
      self.cfg.Update(iobj, feedback_fn)
1411
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1412
                   self.pnode.name)
1413
      feedback_fn("* starting instance...")
1414
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1415
                                            False, self.op.reason)
1416
      result.Raise("Could not start instance")
1417

    
1418
    return list(iobj.all_nodes)
1419

    
1420

    
1421
class LUInstanceRename(LogicalUnit):
1422
  """Rename an instance.
1423

1424
  """
1425
  HPATH = "instance-rename"
1426
  HTYPE = constants.HTYPE_INSTANCE
1427

    
1428
  def CheckArguments(self):
1429
    """Check arguments.
1430

1431
    """
1432
    if self.op.ip_check and not self.op.name_check:
1433
      # TODO: make the ip check more flexible and not depend on the name check
1434
      raise errors.OpPrereqError("IP address check requires a name check",
1435
                                 errors.ECODE_INVAL)
1436

    
1437
  def BuildHooksEnv(self):
1438
    """Build hooks env.
1439

1440
    This runs on master, primary and secondary nodes of the instance.
1441

1442
    """
1443
    env = BuildInstanceHookEnvByObject(self, self.instance)
1444
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1445
    return env
1446

    
1447
  def BuildHooksNodes(self):
1448
    """Build hooks nodes.
1449

1450
    """
1451
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1452
    return (nl, nl)
1453

    
1454
  def CheckPrereq(self):
1455
    """Check prerequisites.
1456

1457
    This checks that the instance is in the cluster and is not running.
1458

1459
    """
1460
    (self.op.instance_uuid, self.op.instance_name) = \
1461
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1462
                                self.op.instance_name)
1463
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1464
    assert instance is not None
1465

    
1466
    # It should actually not happen that an instance is running with a disabled
1467
    # disk template, but in case it does, the renaming of file-based instances
1468
    # will fail horribly. Thus, we test it before.
1469
    if (instance.disk_template in constants.DTS_FILEBASED and
1470
        self.op.new_name != instance.name):
1471
      CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1472
                               instance.disk_template)
1473

    
1474
    CheckNodeOnline(self, instance.primary_node)
1475
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1476
                       msg="cannot rename")
1477
    self.instance = instance
1478

    
1479
    new_name = self.op.new_name
1480
    if self.op.name_check:
1481
      hostname = _CheckHostnameSane(self, new_name)
1482
      new_name = self.op.new_name = hostname.name
1483
      if (self.op.ip_check and
1484
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1485
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1486
                                   (hostname.ip, new_name),
1487
                                   errors.ECODE_NOTUNIQUE)
1488

    
1489
    instance_names = [inst.name for
1490
                      inst in self.cfg.GetAllInstancesInfo().values()]
1491
    if new_name in instance_names and new_name != instance.name:
1492
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1493
                                 new_name, errors.ECODE_EXISTS)
1494

    
1495
  def Exec(self, feedback_fn):
1496
    """Rename the instance.
1497

1498
    """
1499
    old_name = self.instance.name
1500

    
1501
    rename_file_storage = False
1502
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1503
        self.op.new_name != self.instance.name):
1504
      old_file_storage_dir = os.path.dirname(
1505
                               self.instance.disks[0].logical_id[1])
1506
      rename_file_storage = True
1507

    
1508
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1509
    # Change the instance lock. This is definitely safe while we hold the BGL.
1510
    # Otherwise the new lock would have to be added in acquired mode.
1511
    assert self.REQ_BGL
1512
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1513
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1514
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1515

    
1516
    # re-read the instance from the configuration after rename
1517
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1518

    
1519
    if rename_file_storage:
1520
      new_file_storage_dir = os.path.dirname(
1521
                               renamed_inst.disks[0].logical_id[1])
1522
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1523
                                                     old_file_storage_dir,
1524
                                                     new_file_storage_dir)
1525
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1526
                   " (but the instance has been renamed in Ganeti)" %
1527
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1528
                    old_file_storage_dir, new_file_storage_dir))
1529

    
1530
    StartInstanceDisks(self, renamed_inst, None)
1531
    # update info on disks
1532
    info = GetInstanceInfoText(renamed_inst)
1533
    for (idx, disk) in enumerate(renamed_inst.disks):
1534
      for node_uuid in renamed_inst.all_nodes:
1535
        self.cfg.SetDiskID(disk, node_uuid)
1536
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1537
        result.Warn("Error setting info on node %s for disk %s" %
1538
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1539
    try:
1540
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1541
                                                 renamed_inst, old_name,
1542
                                                 self.op.debug_level)
1543
      result.Warn("Could not run OS rename script for instance %s on node %s"
1544
                  " (but the instance has been renamed in Ganeti)" %
1545
                  (renamed_inst.name,
1546
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1547
                  self.LogWarning)
1548
    finally:
1549
      ShutdownInstanceDisks(self, renamed_inst)
1550

    
1551
    return renamed_inst.name
1552

    
1553

    
1554
class LUInstanceRemove(LogicalUnit):
1555
  """Remove an instance.
1556

1557
  """
1558
  HPATH = "instance-remove"
1559
  HTYPE = constants.HTYPE_INSTANCE
1560
  REQ_BGL = False
1561

    
1562
  def ExpandNames(self):
1563
    self._ExpandAndLockInstance()
1564
    self.needed_locks[locking.LEVEL_NODE] = []
1565
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1566
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1567

    
1568
  def DeclareLocks(self, level):
1569
    if level == locking.LEVEL_NODE:
1570
      self._LockInstancesNodes()
1571
    elif level == locking.LEVEL_NODE_RES:
1572
      # Copy node locks
1573
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1574
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1575

    
1576
  def BuildHooksEnv(self):
1577
    """Build hooks env.
1578

1579
    This runs on master, primary and secondary nodes of the instance.
1580

1581
    """
1582
    env = BuildInstanceHookEnvByObject(self, self.instance)
1583
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1584
    return env
1585

    
1586
  def BuildHooksNodes(self):
1587
    """Build hooks nodes.
1588

1589
    """
1590
    nl = [self.cfg.GetMasterNode()]
1591
    nl_post = list(self.instance.all_nodes) + nl
1592
    return (nl, nl_post)
1593

    
1594
  def CheckPrereq(self):
1595
    """Check prerequisites.
1596

1597
    This checks that the instance is in the cluster.
1598

1599
    """
1600
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1601
    assert self.instance is not None, \
1602
      "Cannot retrieve locked instance %s" % self.op.instance_name
1603

    
1604
  def Exec(self, feedback_fn):
1605
    """Remove the instance.
1606

1607
    """
1608
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1609
                 self.cfg.GetNodeName(self.instance.primary_node))
1610

    
1611
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1612
                                             self.instance,
1613
                                             self.op.shutdown_timeout,
1614
                                             self.op.reason)
1615
    if self.op.ignore_failures:
1616
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1617
    else:
1618
      result.Raise("Could not shutdown instance %s on node %s" %
1619
                   (self.instance.name,
1620
                    self.cfg.GetNodeName(self.instance.primary_node)))
1621

    
1622
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1623
            self.owned_locks(locking.LEVEL_NODE_RES))
1624
    assert not (set(self.instance.all_nodes) -
1625
                self.owned_locks(locking.LEVEL_NODE)), \
1626
      "Not owning correct locks"
1627

    
1628
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1629

    
1630

    
1631
class LUInstanceMove(LogicalUnit):
1632
  """Move an instance by data-copying.
1633

1634
  """
1635
  HPATH = "instance-move"
1636
  HTYPE = constants.HTYPE_INSTANCE
1637
  REQ_BGL = False
1638

    
1639
  def ExpandNames(self):
1640
    self._ExpandAndLockInstance()
1641
    (self.op.target_node_uuid, self.op.target_node) = \
1642
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1643
                            self.op.target_node)
1644
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
1645
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1646
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1647

    
1648
  def DeclareLocks(self, level):
1649
    if level == locking.LEVEL_NODE:
1650
      self._LockInstancesNodes(primary_only=True)
1651
    elif level == locking.LEVEL_NODE_RES:
1652
      # Copy node locks
1653
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1654
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1655

    
1656
  def BuildHooksEnv(self):
1657
    """Build hooks env.
1658

1659
    This runs on master, primary and secondary nodes of the instance.
1660

1661
    """
1662
    env = {
1663
      "TARGET_NODE": self.op.target_node,
1664
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1665
      }
1666
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1667
    return env
1668

    
1669
  def BuildHooksNodes(self):
1670
    """Build hooks nodes.
1671

1672
    """
1673
    nl = [
1674
      self.cfg.GetMasterNode(),
1675
      self.instance.primary_node,
1676
      self.op.target_node_uuid,
1677
      ]
1678
    return (nl, nl)
1679

    
1680
  def CheckPrereq(self):
1681
    """Check prerequisites.
1682

1683
    This checks that the instance is in the cluster.
1684

1685
    """
1686
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1687
    assert self.instance is not None, \
1688
      "Cannot retrieve locked instance %s" % self.op.instance_name
1689

    
1690
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1691
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1692
                                 self.instance.disk_template,
1693
                                 errors.ECODE_STATE)
1694

    
1695
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1696
    assert target_node is not None, \
1697
      "Cannot retrieve locked node %s" % self.op.target_node
1698

    
1699
    self.target_node_uuid = target_node.uuid
1700
    if target_node.uuid == self.instance.primary_node:
1701
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1702
                                 (self.instance.name, target_node.name),
1703
                                 errors.ECODE_STATE)
1704

    
1705
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1706

    
1707
    for idx, dsk in enumerate(self.instance.disks):
1708
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1709
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1710
                                   " cannot copy" % idx, errors.ECODE_STATE)
1711

    
1712
    CheckNodeOnline(self, target_node.uuid)
1713
    CheckNodeNotDrained(self, target_node.uuid)
1714
    CheckNodeVmCapable(self, target_node.uuid)
1715
    cluster = self.cfg.GetClusterInfo()
1716
    group_info = self.cfg.GetNodeGroup(target_node.group)
1717
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1718
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1719
                           ignore=self.op.ignore_ipolicy)
1720

    
1721
    if self.instance.admin_state == constants.ADMINST_UP:
1722
      # check memory requirements on the secondary node
1723
      CheckNodeFreeMemory(
1724
          self, target_node.uuid, "failing over instance %s" %
1725
          self.instance.name, bep[constants.BE_MAXMEM],
1726
          self.instance.hypervisor,
1727
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1728
    else:
1729
      self.LogInfo("Not checking memory on the secondary node as"
1730
                   " instance will not be started")
1731

    
1732
    # check bridge existance
1733
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1734

    
1735
  def Exec(self, feedback_fn):
1736
    """Move an instance.
1737

1738
    The move is done by shutting it down on its present node, copying
1739
    the data over (slow) and starting it on the new node.
1740

1741
    """
1742
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1743
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1744

    
1745
    self.LogInfo("Shutting down instance %s on source node %s",
1746
                 self.instance.name, source_node.name)
1747

    
1748
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1749
            self.owned_locks(locking.LEVEL_NODE_RES))
1750

    
1751
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1752
                                             self.op.shutdown_timeout,
1753
                                             self.op.reason)
1754
    if self.op.ignore_consistency:
1755
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1756
                  " anyway. Please make sure node %s is down. Error details" %
1757
                  (self.instance.name, source_node.name, source_node.name),
1758
                  self.LogWarning)
1759
    else:
1760
      result.Raise("Could not shutdown instance %s on node %s" %
1761
                   (self.instance.name, source_node.name))
1762

    
1763
    # create the target disks
1764
    try:
1765
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1766
    except errors.OpExecError:
1767
      self.LogWarning("Device creation failed")
1768
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1769
      raise
1770

    
1771
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1772

    
1773
    errs = []
1774
    # activate, get path, copy the data over
1775
    for idx, disk in enumerate(self.instance.disks):
1776
      self.LogInfo("Copying data for disk %d", idx)
1777
      result = self.rpc.call_blockdev_assemble(
1778
                 target_node.uuid, (disk, self.instance), self.instance.name,
1779
                 True, idx)
1780
      if result.fail_msg:
1781
        self.LogWarning("Can't assemble newly created disk %d: %s",
1782
                        idx, result.fail_msg)
1783
        errs.append(result.fail_msg)
1784
        break
1785
      dev_path = result.payload
1786
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1787
                                                                self.instance),
1788
                                             target_node.name, dev_path,
1789
                                             cluster_name)
1790
      if result.fail_msg:
1791
        self.LogWarning("Can't copy data over for disk %d: %s",
1792
                        idx, result.fail_msg)
1793
        errs.append(result.fail_msg)
1794
        break
1795

    
1796
    if errs:
1797
      self.LogWarning("Some disks failed to copy, aborting")
1798
      try:
1799
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1800
      finally:
1801
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1802
        raise errors.OpExecError("Errors during disk copy: %s" %
1803
                                 (",".join(errs),))
1804

    
1805
    self.instance.primary_node = target_node.uuid
1806
    self.cfg.Update(self.instance, feedback_fn)
1807

    
1808
    self.LogInfo("Removing the disks on the original node")
1809
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1810

    
1811
    # Only start the instance if it's marked as up
1812
    if self.instance.admin_state == constants.ADMINST_UP:
1813
      self.LogInfo("Starting instance %s on node %s",
1814
                   self.instance.name, target_node.name)
1815

    
1816
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1817
                                          ignore_secondaries=True)
1818
      if not disks_ok:
1819
        ShutdownInstanceDisks(self, self.instance)
1820
        raise errors.OpExecError("Can't activate the instance's disks")
1821

    
1822
      result = self.rpc.call_instance_start(target_node.uuid,
1823
                                            (self.instance, None, None), False,
1824
                                            self.op.reason)
1825
      msg = result.fail_msg
1826
      if msg:
1827
        ShutdownInstanceDisks(self, self.instance)
1828
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1829
                                 (self.instance.name, target_node.name, msg))
1830

    
1831

    
1832
class LUInstanceMultiAlloc(NoHooksLU):
1833
  """Allocates multiple instances at the same time.
1834

1835
  """
1836
  REQ_BGL = False
1837

    
1838
  def CheckArguments(self):
1839
    """Check arguments.
1840

1841
    """
1842
    nodes = []
1843
    for inst in self.op.instances:
1844
      if inst.iallocator is not None:
1845
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1846
                                   " instance objects", errors.ECODE_INVAL)
1847
      nodes.append(bool(inst.pnode))
1848
      if inst.disk_template in constants.DTS_INT_MIRROR:
1849
        nodes.append(bool(inst.snode))
1850

    
1851
    has_nodes = compat.any(nodes)
1852
    if compat.all(nodes) ^ has_nodes:
1853
      raise errors.OpPrereqError("There are instance objects providing"
1854
                                 " pnode/snode while others do not",
1855
                                 errors.ECODE_INVAL)
1856

    
1857
    if not has_nodes and self.op.iallocator is None:
1858
      default_iallocator = self.cfg.GetDefaultIAllocator()
1859
      if default_iallocator:
1860
        self.op.iallocator = default_iallocator
1861
      else:
1862
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1863
                                   " given and no cluster-wide default"
1864
                                   " iallocator found; please specify either"
1865
                                   " an iallocator or nodes on the instances"
1866
                                   " or set a cluster-wide default iallocator",
1867
                                   errors.ECODE_INVAL)
1868

    
1869
    _CheckOpportunisticLocking(self.op)
1870

    
1871
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1872
    if dups:
1873
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1874
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1875

    
1876
  def ExpandNames(self):
1877
    """Calculate the locks.
1878

1879
    """
1880
    self.share_locks = ShareAll()
1881
    self.needed_locks = {
1882
      # iallocator will select nodes and even if no iallocator is used,
1883
      # collisions with LUInstanceCreate should be avoided
1884
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1885
      }
1886

    
1887
    if self.op.iallocator:
1888
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1889
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1890

    
1891
      if self.op.opportunistic_locking:
1892
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1893
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1894
    else:
1895
      nodeslist = []
1896
      for inst in self.op.instances:
1897
        (inst.pnode_uuid, inst.pnode) = \
1898
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1899
        nodeslist.append(inst.pnode_uuid)
1900
        if inst.snode is not None:
1901
          (inst.snode_uuid, inst.snode) = \
1902
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1903
          nodeslist.append(inst.snode_uuid)
1904

    
1905
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1906
      # Lock resources of instance's primary and secondary nodes (copy to
1907
      # prevent accidential modification)
1908
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1909

    
1910
  def CheckPrereq(self):
1911
    """Check prerequisite.
1912

1913
    """
1914
    if self.op.iallocator:
1915
      cluster = self.cfg.GetClusterInfo()
1916
      default_vg = self.cfg.GetVGName()
1917
      ec_id = self.proc.GetECId()
1918

    
1919
      if self.op.opportunistic_locking:
1920
        # Only consider nodes for which a lock is held
1921
        node_whitelist = self.cfg.GetNodeNames(
1922
                           list(self.owned_locks(locking.LEVEL_NODE)))
1923
      else:
1924
        node_whitelist = None
1925

    
1926
      insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1927
                                           _ComputeNics(op, cluster, None,
1928
                                                        self.cfg, ec_id),
1929
                                           _ComputeFullBeParams(op, cluster),
1930
                                           node_whitelist)
1931
               for op in self.op.instances]
1932

    
1933
      req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1934
      ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1935

    
1936
      ial.Run(self.op.iallocator)
1937

    
1938
      if not ial.success:
1939
        raise errors.OpPrereqError("Can't compute nodes using"
1940
                                   " iallocator '%s': %s" %
1941
                                   (self.op.iallocator, ial.info),
1942
                                   errors.ECODE_NORES)
1943

    
1944
      self.ia_result = ial.result
1945

    
1946
    if self.op.dry_run:
1947
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1948
        constants.JOB_IDS_KEY: [],
1949
        })
1950

    
1951
  def _ConstructPartialResult(self):
1952
    """Contructs the partial result.
1953

1954
    """
1955
    if self.op.iallocator:
1956
      (allocatable, failed_insts) = self.ia_result
1957
      allocatable_insts = map(compat.fst, allocatable)
1958
    else:
1959
      allocatable_insts = [op.instance_name for op in self.op.instances]
1960
      failed_insts = []
1961

    
1962
    return {
1963
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
1964
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
1965
      }
1966

    
1967
  def Exec(self, feedback_fn):
1968
    """Executes the opcode.
1969

1970
    """
1971
    jobs = []
1972
    if self.op.iallocator:
1973
      op2inst = dict((op.instance_name, op) for op in self.op.instances)
1974
      (allocatable, failed) = self.ia_result
1975

    
1976
      for (name, node_names) in allocatable:
1977
        op = op2inst.pop(name)
1978

    
1979
        (op.pnode_uuid, op.pnode) = \
1980
          ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1981
        if len(node_names) > 1:
1982
          (op.snode_uuid, op.snode) = \
1983
            ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1984

    
1985
          jobs.append([op])
1986

    
1987
        missing = set(op2inst.keys()) - set(failed)
1988
        assert not missing, \
1989
          "Iallocator did return incomplete result: %s" % \
1990
          utils.CommaJoin(missing)
1991
    else:
1992
      jobs.extend([op] for op in self.op.instances)
1993

    
1994
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1995

    
1996

    
1997
class _InstNicModPrivate:
1998
  """Data structure for network interface modifications.
1999

2000
  Used by L{LUInstanceSetParams}.
2001

2002
  """
2003
  def __init__(self):
2004
    self.params = None
2005
    self.filled = None
2006

    
2007

    
2008
def _PrepareContainerMods(mods, private_fn):
2009
  """Prepares a list of container modifications by adding a private data field.
2010

2011
  @type mods: list of tuples; (operation, index, parameters)
2012
  @param mods: List of modifications
2013
  @type private_fn: callable or None
2014
  @param private_fn: Callable for constructing a private data field for a
2015
    modification
2016
  @rtype: list
2017

2018
  """
2019
  if private_fn is None:
2020
    fn = lambda: None
2021
  else:
2022
    fn = private_fn
2023

    
2024
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2025

    
2026

    
2027
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2028
  """Checks if nodes have enough physical CPUs
2029

2030
  This function checks if all given nodes have the needed number of
2031
  physical CPUs. In case any node has less CPUs or we cannot get the
2032
  information from the node, this function raises an OpPrereqError
2033
  exception.
2034

2035
  @type lu: C{LogicalUnit}
2036
  @param lu: a logical unit from which we get configuration data
2037
  @type node_uuids: C{list}
2038
  @param node_uuids: the list of node UUIDs to check
2039
  @type requested: C{int}
2040
  @param requested: the minimum acceptable number of physical CPUs
2041
  @type hypervisor_specs: list of pairs (string, dict of strings)
2042
  @param hypervisor_specs: list of hypervisor specifications in
2043
      pairs (hypervisor_name, hvparams)
2044
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2045
      or we cannot check the node
2046

2047
  """
2048
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2049
  for node_uuid in node_uuids:
2050
    info = nodeinfo[node_uuid]
2051
    node_name = lu.cfg.GetNodeName(node_uuid)
2052
    info.Raise("Cannot get current information from node %s" % node_name,
2053
               prereq=True, ecode=errors.ECODE_ENVIRON)
2054
    (_, _, (hv_info, )) = info.payload
2055
    num_cpus = hv_info.get("cpu_total", None)
2056
    if not isinstance(num_cpus, int):
2057
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2058
                                 " on node %s, result was '%s'" %
2059
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2060
    if requested > num_cpus:
2061
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2062
                                 "required" % (node_name, num_cpus, requested),
2063
                                 errors.ECODE_NORES)
2064

    
2065

    
2066
def GetItemFromContainer(identifier, kind, container):
2067
  """Return the item refered by the identifier.
2068

2069
  @type identifier: string
2070
  @param identifier: Item index or name or UUID
2071
  @type kind: string
2072
  @param kind: One-word item description
2073
  @type container: list
2074
  @param container: Container to get the item from
2075

2076
  """
2077
  # Index
2078
  try:
2079
    idx = int(identifier)
2080
    if idx == -1:
2081
      # Append
2082
      absidx = len(container) - 1
2083
    elif idx < 0:
2084
      raise IndexError("Not accepting negative indices other than -1")
2085
    elif idx > len(container):
2086
      raise IndexError("Got %s index %s, but there are only %s" %
2087
                       (kind, idx, len(container)))
2088
    else:
2089
      absidx = idx
2090
    return (absidx, container[idx])
2091
  except ValueError:
2092
    pass
2093

    
2094
  for idx, item in enumerate(container):
2095
    if item.uuid == identifier or item.name == identifier:
2096
      return (idx, item)
2097

    
2098
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2099
                             (kind, identifier), errors.ECODE_NOENT)
2100

    
2101

    
2102
def _ApplyContainerMods(kind, container, chgdesc, mods,
2103
                        create_fn, modify_fn, remove_fn):
2104
  """Applies descriptions in C{mods} to C{container}.
2105

2106
  @type kind: string
2107
  @param kind: One-word item description
2108
  @type container: list
2109
  @param container: Container to modify
2110
  @type chgdesc: None or list
2111
  @param chgdesc: List of applied changes
2112
  @type mods: list
2113
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2114
  @type create_fn: callable
2115
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2116
    receives absolute item index, parameters and private data object as added
2117
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2118
    as list
2119
  @type modify_fn: callable
2120
  @param modify_fn: Callback for modifying an existing item
2121
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2122
    and private data object as added by L{_PrepareContainerMods}, returns
2123
    changes as list
2124
  @type remove_fn: callable
2125
  @param remove_fn: Callback on removing item; receives absolute item index,
2126
    item and private data object as added by L{_PrepareContainerMods}
2127

2128
  """
2129
  for (op, identifier, params, private) in mods:
2130
    changes = None
2131

    
2132
    if op == constants.DDM_ADD:
2133
      # Calculate where item will be added
2134
      # When adding an item, identifier can only be an index
2135
      try:
2136
        idx = int(identifier)
2137
      except ValueError:
2138
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2139
                                   " identifier for %s" % constants.DDM_ADD,
2140
                                   errors.ECODE_INVAL)
2141
      if idx == -1:
2142
        addidx = len(container)
2143
      else:
2144
        if idx < 0:
2145
          raise IndexError("Not accepting negative indices other than -1")
2146
        elif idx > len(container):
2147
          raise IndexError("Got %s index %s, but there are only %s" %
2148
                           (kind, idx, len(container)))
2149
        addidx = idx
2150

    
2151
      if create_fn is None:
2152
        item = params
2153
      else:
2154
        (item, changes) = create_fn(addidx, params, private)
2155

    
2156
      if idx == -1:
2157
        container.append(item)
2158
      else:
2159
        assert idx >= 0
2160
        assert idx <= len(container)
2161
        # list.insert does so before the specified index
2162
        container.insert(idx, item)
2163
    else:
2164
      # Retrieve existing item
2165
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2166

    
2167
      if op == constants.DDM_REMOVE:
2168
        assert not params
2169

    
2170
        if remove_fn is not None:
2171
          remove_fn(absidx, item, private)
2172

    
2173
        changes = [("%s/%s" % (kind, absidx), "remove")]
2174

    
2175
        assert container[absidx] == item
2176
        del container[absidx]
2177
      elif op == constants.DDM_MODIFY:
2178
        if modify_fn is not None:
2179
          changes = modify_fn(absidx, item, params, private)
2180
      else:
2181
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2182

    
2183
    assert _TApplyContModsCbChanges(changes)
2184

    
2185
    if not (chgdesc is None or changes is None):
2186
      chgdesc.extend(changes)
2187

    
2188

    
2189
def _UpdateIvNames(base_index, disks):
2190
  """Updates the C{iv_name} attribute of disks.
2191

2192
  @type disks: list of L{objects.Disk}
2193

2194
  """
2195
  for (idx, disk) in enumerate(disks):
2196
    disk.iv_name = "disk/%s" % (base_index + idx, )
2197

    
2198

    
2199
class LUInstanceSetParams(LogicalUnit):
2200
  """Modifies an instances's parameters.
2201

2202
  """
2203
  HPATH = "instance-modify"
2204
  HTYPE = constants.HTYPE_INSTANCE
2205
  REQ_BGL = False
2206

    
2207
  @staticmethod
2208
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2209
    assert ht.TList(mods)
2210
    assert not mods or len(mods[0]) in (2, 3)
2211

    
2212
    if mods and len(mods[0]) == 2:
2213
      result = []
2214

    
2215
      addremove = 0
2216
      for op, params in mods:
2217
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2218
          result.append((op, -1, params))
2219
          addremove += 1
2220

    
2221
          if addremove > 1:
2222
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2223
                                       " supported at a time" % kind,
2224
                                       errors.ECODE_INVAL)
2225
        else:
2226
          result.append((constants.DDM_MODIFY, op, params))
2227

    
2228
      assert verify_fn(result)
2229
    else:
2230
      result = mods
2231

    
2232
    return result
2233

    
2234
  @staticmethod
2235
  def _CheckMods(kind, mods, key_types, item_fn):
2236
    """Ensures requested disk/NIC modifications are valid.
2237

2238
    """
2239
    for (op, _, params) in mods:
2240
      assert ht.TDict(params)
2241

    
2242
      # If 'key_types' is an empty dict, we assume we have an
2243
      # 'ext' template and thus do not ForceDictType
2244
      if key_types:
2245
        utils.ForceDictType(params, key_types)
2246

    
2247
      if op == constants.DDM_REMOVE:
2248
        if params:
2249
          raise errors.OpPrereqError("No settings should be passed when"
2250
                                     " removing a %s" % kind,
2251
                                     errors.ECODE_INVAL)
2252
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2253
        item_fn(op, params)
2254
      else:
2255
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2256

    
2257
  @staticmethod
2258
  def _VerifyDiskModification(op, params, excl_stor):
2259
    """Verifies a disk modification.
2260

2261
    """
2262
    if op == constants.DDM_ADD:
2263
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2264
      if mode not in constants.DISK_ACCESS_SET:
2265
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2266
                                   errors.ECODE_INVAL)
2267

    
2268
      size = params.get(constants.IDISK_SIZE, None)
2269
      if size is None:
2270
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2271
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2272

    
2273
      try:
2274
        size = int(size)
2275
      except (TypeError, ValueError), err:
2276
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2277
                                   errors.ECODE_INVAL)
2278

    
2279
      params[constants.IDISK_SIZE] = size
2280
      name = params.get(constants.IDISK_NAME, None)
2281
      if name is not None and name.lower() == constants.VALUE_NONE:
2282
        params[constants.IDISK_NAME] = None
2283

    
2284
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2285

    
2286
    elif op == constants.DDM_MODIFY:
2287
      if constants.IDISK_SIZE in params:
2288
        raise errors.OpPrereqError("Disk size change not possible, use"
2289
                                   " grow-disk", errors.ECODE_INVAL)
2290
      if len(params) > 2:
2291
        raise errors.OpPrereqError("Disk modification doesn't support"
2292
                                   " additional arbitrary parameters",
2293
                                   errors.ECODE_INVAL)
2294
      name = params.get(constants.IDISK_NAME, None)
2295
      if name is not None and name.lower() == constants.VALUE_NONE:
2296
        params[constants.IDISK_NAME] = None
2297

    
2298
  @staticmethod
2299
  def _VerifyNicModification(op, params):
2300
    """Verifies a network interface modification.
2301

2302
    """
2303
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2304
      ip = params.get(constants.INIC_IP, None)
2305
      name = params.get(constants.INIC_NAME, None)
2306
      req_net = params.get(constants.INIC_NETWORK, None)
2307
      link = params.get(constants.NIC_LINK, None)
2308
      mode = params.get(constants.NIC_MODE, None)
2309
      if name is not None and name.lower() == constants.VALUE_NONE:
2310
        params[constants.INIC_NAME] = None
2311
      if req_net is not None:
2312
        if req_net.lower() == constants.VALUE_NONE:
2313
          params[constants.INIC_NETWORK] = None
2314
          req_net = None
2315
        elif link is not None or mode is not None:
2316
          raise errors.OpPrereqError("If network is given"
2317
                                     " mode or link should not",
2318
                                     errors.ECODE_INVAL)
2319

    
2320
      if op == constants.DDM_ADD:
2321
        macaddr = params.get(constants.INIC_MAC, None)
2322
        if macaddr is None:
2323
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2324

    
2325
      if ip is not None:
2326
        if ip.lower() == constants.VALUE_NONE:
2327
          params[constants.INIC_IP] = None
2328
        else:
2329
          if ip.lower() == constants.NIC_IP_POOL:
2330
            if op == constants.DDM_ADD and req_net is None:
2331
              raise errors.OpPrereqError("If ip=pool, parameter network"
2332
                                         " cannot be none",
2333
                                         errors.ECODE_INVAL)
2334
          else:
2335
            if not netutils.IPAddress.IsValid(ip):
2336
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2337
                                         errors.ECODE_INVAL)
2338

    
2339
      if constants.INIC_MAC in params:
2340
        macaddr = params[constants.INIC_MAC]
2341
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2342
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2343

    
2344
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2345
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2346
                                     " modifying an existing NIC",
2347
                                     errors.ECODE_INVAL)
2348

    
2349
  def CheckArguments(self):
2350
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2351
            self.op.hvparams or self.op.beparams or self.op.os_name or
2352
            self.op.osparams or self.op.offline is not None or
2353
            self.op.runtime_mem or self.op.pnode):
2354
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2355

    
2356
    if self.op.hvparams:
2357
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2358
                           "hypervisor", "instance", "cluster")
2359

    
2360
    self.op.disks = self._UpgradeDiskNicMods(
2361
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2362
    self.op.nics = self._UpgradeDiskNicMods(
2363
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2364

    
2365
    if self.op.disks and self.op.disk_template is not None:
2366
      raise errors.OpPrereqError("Disk template conversion and other disk"
2367
                                 " changes not supported at the same time",
2368
                                 errors.ECODE_INVAL)
2369

    
2370
    if (self.op.disk_template and
2371
        self.op.disk_template in constants.DTS_INT_MIRROR and
2372
        self.op.remote_node is None):
2373
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2374
                                 " one requires specifying a secondary node",
2375
                                 errors.ECODE_INVAL)
2376

    
2377
    # Check NIC modifications
2378
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2379
                    self._VerifyNicModification)
2380

    
2381
    if self.op.pnode:
2382
      (self.op.pnode_uuid, self.op.pnode) = \
2383
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2384

    
2385
  def ExpandNames(self):
2386
    self._ExpandAndLockInstance()
2387
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2388
    # Can't even acquire node locks in shared mode as upcoming changes in
2389
    # Ganeti 2.6 will start to modify the node object on disk conversion
2390
    self.needed_locks[locking.LEVEL_NODE] = []
2391
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2392
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2393
    # Look node group to look up the ipolicy
2394
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2395

    
2396
  def DeclareLocks(self, level):
2397
    if level == locking.LEVEL_NODEGROUP:
2398
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2399
      # Acquire locks for the instance's nodegroups optimistically. Needs
2400
      # to be verified in CheckPrereq
2401
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2402
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2403
    elif level == locking.LEVEL_NODE:
2404
      self._LockInstancesNodes()
2405
      if self.op.disk_template and self.op.remote_node:
2406
        (self.op.remote_node_uuid, self.op.remote_node) = \
2407
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2408
                                self.op.remote_node)
2409
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2410
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2411
      # Copy node locks
2412
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2413
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2414

    
2415
  def BuildHooksEnv(self):
2416
    """Build hooks env.
2417

2418
    This runs on the master, primary and secondaries.
2419

2420
    """
2421
    args = {}
2422
    if constants.BE_MINMEM in self.be_new:
2423
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2424
    if constants.BE_MAXMEM in self.be_new:
2425
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2426
    if constants.BE_VCPUS in self.be_new:
2427
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2428
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2429
    # information at all.
2430

    
2431
    if self._new_nics is not None:
2432
      nics = []
2433

    
2434
      for nic in self._new_nics:
2435
        n = copy.deepcopy(nic)
2436
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2437
        n.nicparams = nicparams
2438
        nics.append(NICToTuple(self, n))
2439

    
2440
      args["nics"] = nics
2441

    
2442
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2443
    if self.op.disk_template:
2444
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2445
    if self.op.runtime_mem:
2446
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2447

    
2448
    return env
2449

    
2450
  def BuildHooksNodes(self):
2451
    """Build hooks nodes.
2452

2453
    """
2454
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2455
    return (nl, nl)
2456

    
2457
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2458
                              old_params, cluster, pnode_uuid):
2459

    
2460
    update_params_dict = dict([(key, params[key])
2461
                               for key in constants.NICS_PARAMETERS
2462
                               if key in params])
2463

    
2464
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2465
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2466

    
2467
    new_net_uuid = None
2468
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2469
    if new_net_uuid_or_name:
2470
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2471
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2472

    
2473
    if old_net_uuid:
2474
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2475

    
2476
    if new_net_uuid:
2477
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2478
      if not netparams:
2479
        raise errors.OpPrereqError("No netparams found for the network"
2480
                                   " %s, probably not connected" %
2481
                                   new_net_obj.name, errors.ECODE_INVAL)
2482
      new_params = dict(netparams)
2483
    else:
2484
      new_params = GetUpdatedParams(old_params, update_params_dict)
2485

    
2486
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2487

    
2488
    new_filled_params = cluster.SimpleFillNIC(new_params)
2489
    objects.NIC.CheckParameterSyntax(new_filled_params)
2490

    
2491
    new_mode = new_filled_params[constants.NIC_MODE]
2492
    if new_mode == constants.NIC_MODE_BRIDGED:
2493
      bridge = new_filled_params[constants.NIC_LINK]
2494
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2495
      if msg:
2496
        msg = "Error checking bridges on node '%s': %s" % \
2497
                (self.cfg.GetNodeName(pnode_uuid), msg)
2498
        if self.op.force:
2499
          self.warn.append(msg)
2500
        else:
2501
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2502

    
2503
    elif new_mode == constants.NIC_MODE_ROUTED:
2504
      ip = params.get(constants.INIC_IP, old_ip)
2505
      if ip is None:
2506
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2507
                                   " on a routed NIC", errors.ECODE_INVAL)
2508

    
2509
    elif new_mode == constants.NIC_MODE_OVS:
2510
      # TODO: check OVS link
2511
      self.LogInfo("OVS links are currently not checked for correctness")
2512

    
2513
    if constants.INIC_MAC in params:
2514
      mac = params[constants.INIC_MAC]
2515
      if mac is None:
2516
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2517
                                   errors.ECODE_INVAL)
2518
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2519
        # otherwise generate the MAC address
2520
        params[constants.INIC_MAC] = \
2521
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2522
      else:
2523
        # or validate/reserve the current one
2524
        try:
2525
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2526
        except errors.ReservationError:
2527
          raise errors.OpPrereqError("MAC address '%s' already in use"
2528
                                     " in cluster" % mac,
2529
                                     errors.ECODE_NOTUNIQUE)
2530
    elif new_net_uuid != old_net_uuid:
2531

    
2532
      def get_net_prefix(net_uuid):
2533
        mac_prefix = None
2534
        if net_uuid:
2535
          nobj = self.cfg.GetNetwork(net_uuid)
2536
          mac_prefix = nobj.mac_prefix
2537

    
2538
        return mac_prefix
2539

    
2540
      new_prefix = get_net_prefix(new_net_uuid)
2541
      old_prefix = get_net_prefix(old_net_uuid)
2542
      if old_prefix != new_prefix:
2543
        params[constants.INIC_MAC] = \
2544
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2545

    
2546
    # if there is a change in (ip, network) tuple
2547
    new_ip = params.get(constants.INIC_IP, old_ip)
2548
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2549
      if new_ip:
2550
        # if IP is pool then require a network and generate one IP
2551
        if new_ip.lower() == constants.NIC_IP_POOL:
2552
          if new_net_uuid:
2553
            try:
2554
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2555
            except errors.ReservationError:
2556
              raise errors.OpPrereqError("Unable to get a free IP"
2557
                                         " from the address pool",
2558
                                         errors.ECODE_STATE)
2559
            self.LogInfo("Chose IP %s from network %s",
2560
                         new_ip,
2561
                         new_net_obj.name)
2562
            params[constants.INIC_IP] = new_ip
2563
          else:
2564
            raise errors.OpPrereqError("ip=pool, but no network found",
2565
                                       errors.ECODE_INVAL)
2566
        # Reserve new IP if in the new network if any
2567
        elif new_net_uuid:
2568
          try:
2569
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2570
            self.LogInfo("Reserving IP %s in network %s",
2571
                         new_ip, new_net_obj.name)
2572
          except errors.ReservationError:
2573
            raise errors.OpPrereqError("IP %s not available in network %s" %
2574
                                       (new_ip, new_net_obj.name),
2575
                                       errors.ECODE_NOTUNIQUE)
2576
        # new network is None so check if new IP is a conflicting IP
2577
        elif self.op.conflicts_check:
2578
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2579

    
2580
      # release old IP if old network is not None
2581
      if old_ip and old_net_uuid:
2582
        try:
2583
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2584
        except errors.AddressPoolError:
2585
          logging.warning("Release IP %s not contained in network %s",
2586
                          old_ip, old_net_obj.name)
2587

    
2588
    # there are no changes in (ip, network) tuple and old network is not None
2589
    elif (old_net_uuid is not None and
2590
          (req_link is not None or req_mode is not None)):
2591
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2592
                                 " a NIC that is connected to a network",
2593
                                 errors.ECODE_INVAL)
2594

    
2595
    private.params = new_params
2596
    private.filled = new_filled_params
2597

    
2598
  def _PreCheckDiskTemplate(self, pnode_info):
2599
    """CheckPrereq checks related to a new disk template."""
2600
    # Arguments are passed to avoid configuration lookups
2601
    pnode_uuid = self.instance.primary_node
2602
    if self.instance.disk_template == self.op.disk_template:
2603
      raise errors.OpPrereqError("Instance already has disk template %s" %
2604
                                 self.instance.disk_template,
2605
                                 errors.ECODE_INVAL)
2606

    
2607
    if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2608
      raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2609
                                 " cluster." % self.op.disk_template)
2610

    
2611
    if (self.instance.disk_template,
2612
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2613
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2614
                                 " %s to %s" % (self.instance.disk_template,
2615
                                                self.op.disk_template),
2616
                                 errors.ECODE_INVAL)
2617
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2618
                       msg="cannot change disk template")
2619
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2620
      if self.op.remote_node_uuid == pnode_uuid:
2621
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2622
                                   " as the primary node of the instance" %
2623
                                   self.op.remote_node, errors.ECODE_STATE)
2624
      CheckNodeOnline(self, self.op.remote_node_uuid)
2625
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2626
      # FIXME: here we assume that the old instance type is DT_PLAIN
2627
      assert self.instance.disk_template == constants.DT_PLAIN
2628
      disks = [{constants.IDISK_SIZE: d.size,
2629
                constants.IDISK_VG: d.logical_id[0]}
2630
               for d in self.instance.disks]
2631
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2632
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2633

    
2634
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2635
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2636
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2637
                                                              snode_group)
2638
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2639
                             ignore=self.op.ignore_ipolicy)
2640
      if pnode_info.group != snode_info.group:
2641
        self.LogWarning("The primary and secondary nodes are in two"
2642
                        " different node groups; the disk parameters"
2643
                        " from the first disk's node group will be"
2644
                        " used")
2645

    
2646
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2647
      # Make sure none of the nodes require exclusive storage
2648
      nodes = [pnode_info]
2649
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2650
        assert snode_info
2651
        nodes.append(snode_info)
2652
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2653
      if compat.any(map(has_es, nodes)):
2654
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2655
                  " storage is enabled" % (self.instance.disk_template,
2656
                                           self.op.disk_template))
2657
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2658

    
2659
  def _PreCheckDisks(self, ispec):
2660
    """CheckPrereq checks related to disk changes.
2661

2662
    @type ispec: dict
2663
    @param ispec: instance specs to be updated with the new disks
2664

2665
    """
2666
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2667

    
2668
    excl_stor = compat.any(
2669
      rpc.GetExclusiveStorageForNodes(self.cfg,
2670
                                      self.instance.all_nodes).values()
2671
      )
2672

    
2673
    # Check disk modifications. This is done here and not in CheckArguments
2674
    # (as with NICs), because we need to know the instance's disk template
2675
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2676
    if self.instance.disk_template == constants.DT_EXT:
2677
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2678
    else:
2679
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2680
                      ver_fn)
2681

    
2682
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2683

    
2684
    # Check the validity of the `provider' parameter
2685
    if self.instance.disk_template in constants.DT_EXT:
2686
      for mod in self.diskmod:
2687
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2688
        if mod[0] == constants.DDM_ADD:
2689
          if ext_provider is None:
2690
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2691
                                       " '%s' missing, during disk add" %
2692
                                       (constants.DT_EXT,
2693
                                        constants.IDISK_PROVIDER),
2694
                                       errors.ECODE_NOENT)
2695
        elif mod[0] == constants.DDM_MODIFY:
2696
          if ext_provider:
2697
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2698
                                       " modification" %
2699
                                       constants.IDISK_PROVIDER,
2700
                                       errors.ECODE_INVAL)
2701
    else:
2702
      for mod in self.diskmod:
2703
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2704
        if ext_provider is not None:
2705
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2706
                                     " instances of type '%s'" %
2707
                                     (constants.IDISK_PROVIDER,
2708
                                      constants.DT_EXT),
2709
                                     errors.ECODE_INVAL)
2710

    
2711
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2712
      raise errors.OpPrereqError("Disk operations not supported for"
2713
                                 " diskless instances", errors.ECODE_INVAL)
2714

    
2715
    def _PrepareDiskMod(_, disk, params, __):
2716
      disk.name = params.get(constants.IDISK_NAME, None)
2717

    
2718
    # Verify disk changes (operating on a copy)
2719
    disks = copy.deepcopy(self.instance.disks)
2720
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2721
                        _PrepareDiskMod, None)
2722
    utils.ValidateDeviceNames("disk", disks)
2723
    if len(disks) > constants.MAX_DISKS:
2724
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2725
                                 " more" % constants.MAX_DISKS,
2726
                                 errors.ECODE_STATE)
2727
    disk_sizes = [disk.size for disk in self.instance.disks]
2728
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2729
                      self.diskmod if op == constants.DDM_ADD)
2730
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2731
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2732

    
2733
    if self.op.offline is not None and self.op.offline:
2734
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2735
                         msg="can't change to offline")
2736

    
2737
  def CheckPrereq(self):
2738
    """Check prerequisites.
2739

2740
    This only checks the instance list against the existing names.
2741

2742
    """
2743
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2744
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2745
    self.cluster = self.cfg.GetClusterInfo()
2746

    
2747
    assert self.instance is not None, \
2748
      "Cannot retrieve locked instance %s" % self.op.instance_name
2749

    
2750
    pnode_uuid = self.instance.primary_node
2751

    
2752
    self.warn = []
2753

    
2754
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2755
        not self.op.force):
2756
      # verify that the instance is not up
2757
      instance_info = self.rpc.call_instance_info(
2758
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2759
          self.instance.hvparams)
2760
      if instance_info.fail_msg:
2761
        self.warn.append("Can't get instance runtime information: %s" %
2762
                         instance_info.fail_msg)
2763
      elif instance_info.payload:
2764
        raise errors.OpPrereqError("Instance is still running on %s" %
2765
                                   self.cfg.GetNodeName(pnode_uuid),
2766
                                   errors.ECODE_STATE)
2767

    
2768
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2769
    node_uuids = list(self.instance.all_nodes)
2770
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2771

    
2772
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2773
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2774
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2775

    
2776
    # dictionary with instance information after the modification
2777
    ispec = {}
2778

    
2779
    # Prepare NIC modifications
2780
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2781

    
2782
    # OS change
2783
    if self.op.os_name and not self.op.force:
2784
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2785
                     self.op.force_variant)
2786
      instance_os = self.op.os_name
2787
    else:
2788
      instance_os = self.instance.os
2789

    
2790
    assert not (self.op.disk_template and self.op.disks), \
2791
      "Can't modify disk template and apply disk changes at the same time"
2792

    
2793
    if self.op.disk_template:
2794
      self._PreCheckDiskTemplate(pnode_info)
2795

    
2796
    self._PreCheckDisks(ispec)
2797

    
2798
    # hvparams processing
2799
    if self.op.hvparams:
2800
      hv_type = self.instance.hypervisor
2801
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2802
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2803
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2804

    
2805
      # local check
2806
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2807
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2808
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2809
      self.hv_inst = i_hvdict # the new dict (without defaults)
2810
    else:
2811
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2812
                                                   self.instance.os,
2813
                                                   self.instance.hvparams)
2814
      self.hv_new = self.hv_inst = {}
2815

    
2816
    # beparams processing
2817
    if self.op.beparams:
2818
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2819
                                  use_none=True)
2820
      objects.UpgradeBeParams(i_bedict)
2821
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2822
      be_new = self.cluster.SimpleFillBE(i_bedict)
2823
      self.be_proposed = self.be_new = be_new # the new actual values
2824
      self.be_inst = i_bedict # the new dict (without defaults)
2825
    else:
2826
      self.be_new = self.be_inst = {}
2827
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2828
    be_old = self.cluster.FillBE(self.instance)
2829

    
2830
    # CPU param validation -- checking every time a parameter is
2831
    # changed to cover all cases where either CPU mask or vcpus have
2832
    # changed
2833
    if (constants.BE_VCPUS in self.be_proposed and
2834
        constants.HV_CPU_MASK in self.hv_proposed):
2835
      cpu_list = \
2836
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2837
      # Verify mask is consistent with number of vCPUs. Can skip this
2838
      # test if only 1 entry in the CPU mask, which means same mask
2839
      # is applied to all vCPUs.
2840
      if (len(cpu_list) > 1 and
2841
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2842
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2843
                                   " CPU mask [%s]" %
2844
                                   (self.be_proposed[constants.BE_VCPUS],
2845
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2846
                                   errors.ECODE_INVAL)
2847

    
2848
      # Only perform this test if a new CPU mask is given
2849
      if constants.HV_CPU_MASK in self.hv_new:
2850
        # Calculate the largest CPU number requested
2851
        max_requested_cpu = max(map(max, cpu_list))
2852
        # Check that all of the instance's nodes have enough physical CPUs to
2853
        # satisfy the requested CPU mask
2854
        hvspecs = [(self.instance.hypervisor,
2855
                    self.cfg.GetClusterInfo()
2856
                      .hvparams[self.instance.hypervisor])]
2857
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2858
                                max_requested_cpu + 1,
2859
                                hvspecs)
2860

    
2861
    # osparams processing
2862
    if self.op.osparams:
2863
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2864
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2865
      self.os_inst = i_osdict # the new dict (without defaults)
2866
    else:
2867
      self.os_inst = {}
2868

    
2869
    #TODO(dynmem): do the appropriate check involving MINMEM
2870
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2871
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2872
      mem_check_list = [pnode_uuid]
2873
      if be_new[constants.BE_AUTO_BALANCE]:
2874
        # either we changed auto_balance to yes or it was from before
2875
        mem_check_list.extend(self.instance.secondary_nodes)
2876
      instance_info = self.rpc.call_instance_info(
2877
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2878
          self.instance.hvparams)
2879
      hvspecs = [(self.instance.hypervisor,
2880
                  self.cluster.hvparams[self.instance.hypervisor])]
2881
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2882
                                         hvspecs)
2883
      pninfo = nodeinfo[pnode_uuid]
2884
      msg = pninfo.fail_msg
2885
      if msg:
2886
        # Assume the primary node is unreachable and go ahead
2887
        self.warn.append("Can't get info from primary node %s: %s" %
2888
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2889
      else:
2890
        (_, _, (pnhvinfo, )) = pninfo.payload
2891
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2892
          self.warn.append("Node data from primary node %s doesn't contain"
2893
                           " free memory information" %
2894
                           self.cfg.GetNodeName(pnode_uuid))
2895
        elif instance_info.fail_msg:
2896
          self.warn.append("Can't get instance runtime information: %s" %
2897
                           instance_info.fail_msg)
2898
        else:
2899
          if instance_info.payload:
2900
            current_mem = int(instance_info.payload["memory"])
2901
          else:
2902
            # Assume instance not running
2903
            # (there is a slight race condition here, but it's not very
2904
            # probable, and we have no other way to check)
2905
            # TODO: Describe race condition
2906
            current_mem = 0
2907
          #TODO(dynmem): do the appropriate check involving MINMEM
2908
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2909
                      pnhvinfo["memory_free"])
2910
          if miss_mem > 0:
2911
            raise errors.OpPrereqError("This change will prevent the instance"
2912
                                       " from starting, due to %d MB of memory"
2913
                                       " missing on its primary node" %
2914
                                       miss_mem, errors.ECODE_NORES)
2915

    
2916
      if be_new[constants.BE_AUTO_BALANCE]:
2917
        for node_uuid, nres in nodeinfo.items():
2918
          if node_uuid not in self.instance.secondary_nodes:
2919
            continue
2920
          nres.Raise("Can't get info from secondary node %s" %
2921
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2922
                     ecode=errors.ECODE_STATE)
2923
          (_, _, (nhvinfo, )) = nres.payload
2924
          if not isinstance(nhvinfo.get("memory_free", None), int):
2925
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2926
                                       " memory information" %
2927
                                       self.cfg.GetNodeName(node_uuid),
2928
                                       errors.ECODE_STATE)
2929
          #TODO(dynmem): do the appropriate check involving MINMEM
2930
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2931
            raise errors.OpPrereqError("This change will prevent the instance"
2932
                                       " from failover to its secondary node"
2933
                                       " %s, due to not enough memory" %
2934
                                       self.cfg.GetNodeName(node_uuid),
2935
                                       errors.ECODE_STATE)
2936

    
2937
    if self.op.runtime_mem:
2938
      remote_info = self.rpc.call_instance_info(
2939
         self.instance.primary_node, self.instance.name,
2940
         self.instance.hypervisor,
2941
         self.cluster.hvparams[self.instance.hypervisor])
2942
      remote_info.Raise("Error checking node %s" %
2943
                        self.cfg.GetNodeName(self.instance.primary_node))
2944
      if not remote_info.payload: # not running already
2945
        raise errors.OpPrereqError("Instance %s is not running" %
2946
                                   self.instance.name, errors.ECODE_STATE)
2947

    
2948
      current_memory = remote_info.payload["memory"]
2949
      if (not self.op.force and
2950
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2951
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2952
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2953
                                   " and %d MB of memory unless --force is"
2954
                                   " given" %
2955
                                   (self.instance.name,
2956
                                    self.be_proposed[constants.BE_MINMEM],
2957
                                    self.be_proposed[constants.BE_MAXMEM]),
2958
                                   errors.ECODE_INVAL)
2959

    
2960
      delta = self.op.runtime_mem - current_memory
2961
      if delta > 0:
2962
        CheckNodeFreeMemory(
2963
            self, self.instance.primary_node,
2964
            "ballooning memory for instance %s" % self.instance.name, delta,
2965
            self.instance.hypervisor,
2966
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2967

    
2968
    # make self.cluster visible in the functions below
2969
    cluster = self.cluster
2970

    
2971
    def _PrepareNicCreate(_, params, private):
2972
      self._PrepareNicModification(params, private, None, None,
2973
                                   {}, cluster, pnode_uuid)
2974
      return (None, None)
2975

    
2976
    def _PrepareNicMod(_, nic, params, private):
2977
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2978
                                   nic.nicparams, cluster, pnode_uuid)
2979
      return None
2980

    
2981
    def _PrepareNicRemove(_, params, __):
2982
      ip = params.ip
2983
      net = params.network
2984
      if net is not None and ip is not None:
2985
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2986

    
2987
    # Verify NIC changes (operating on copy)
2988
    nics = self.instance.nics[:]
2989
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2990
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2991
    if len(nics) > constants.MAX_NICS:
2992
      raise errors.OpPrereqError("Instance has too many network interfaces"
2993
                                 " (%d), cannot add more" % constants.MAX_NICS,
2994
                                 errors.ECODE_STATE)
2995

    
2996
    # Pre-compute NIC changes (necessary to use result in hooks)
2997
    self._nic_chgdesc = []
2998
    if self.nicmod:
2999
      # Operate on copies as this is still in prereq
3000
      nics = [nic.Copy() for nic in self.instance.nics]
3001
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3002
                          self._CreateNewNic, self._ApplyNicMods, None)
3003
      # Verify that NIC names are unique and valid
3004
      utils.ValidateDeviceNames("NIC", nics)
3005
      self._new_nics = nics
3006
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3007
    else:
3008
      self._new_nics = None
3009
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3010

    
3011
    if not self.op.ignore_ipolicy:
3012
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3013
                                                              group_info)
3014

    
3015
      # Fill ispec with backend parameters
3016
      ispec[constants.ISPEC_SPINDLE_USE] = \
3017
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3018
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3019
                                                         None)
3020

    
3021
      # Copy ispec to verify parameters with min/max values separately
3022
      if self.op.disk_template:
3023
        new_disk_template = self.op.disk_template
3024
      else:
3025
        new_disk_template = self.instance.disk_template
3026
      ispec_max = ispec.copy()
3027
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3028
        self.be_new.get(constants.BE_MAXMEM, None)
3029
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3030
                                                     new_disk_template)
3031
      ispec_min = ispec.copy()
3032
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3033
        self.be_new.get(constants.BE_MINMEM, None)
3034
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3035
                                                     new_disk_template)
3036

    
3037
      if (res_max or res_min):
3038
        # FIXME: Improve error message by including information about whether
3039
        # the upper or lower limit of the parameter fails the ipolicy.
3040
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3041
               (group_info, group_info.name,
3042
                utils.CommaJoin(set(res_max + res_min))))
3043
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3044

    
3045
  def _ConvertPlainToDrbd(self, feedback_fn):
3046
    """Converts an instance from plain to drbd.
3047

3048
    """
3049
    feedback_fn("Converting template to drbd")
3050
    pnode_uuid = self.instance.primary_node
3051
    snode_uuid = self.op.remote_node_uuid
3052

    
3053
    assert self.instance.disk_template == constants.DT_PLAIN
3054

    
3055
    # create a fake disk info for _GenerateDiskTemplate
3056
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3057
                  constants.IDISK_VG: d.logical_id[0],
3058
                  constants.IDISK_NAME: d.name}
3059
                 for d in self.instance.disks]
3060
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3061
                                     self.instance.uuid, pnode_uuid,
3062
                                     [snode_uuid], disk_info, None, None, 0,
3063
                                     feedback_fn, self.diskparams)
3064
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3065
                                        self.diskparams)
3066
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3067
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3068
    info = GetInstanceInfoText(self.instance)
3069
    feedback_fn("Creating additional volumes...")
3070
    # first, create the missing data and meta devices
3071
    for disk in anno_disks:
3072
      # unfortunately this is... not too nice
3073
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3074
                           info, True, p_excl_stor)
3075
      for child in disk.children:
3076
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3077
                             s_excl_stor)
3078
    # at this stage, all new LVs have been created, we can rename the
3079
    # old ones
3080
    feedback_fn("Renaming original volumes...")
3081
    rename_list = [(o, n.children[0].logical_id)
3082
                   for (o, n) in zip(self.instance.disks, new_disks)]
3083
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3084
    result.Raise("Failed to rename original LVs")
3085

    
3086
    feedback_fn("Initializing DRBD devices...")
3087
    # all child devices are in place, we can now create the DRBD devices
3088
    try:
3089
      for disk in anno_disks:
3090
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3091
                                       (snode_uuid, s_excl_stor)]:
3092
          f_create = node_uuid == pnode_uuid
3093
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3094
                               f_create, excl_stor)
3095
    except errors.GenericError, e:
3096
      feedback_fn("Initializing of DRBD devices failed;"
3097
                  " renaming back original volumes...")
3098
      for disk in new_disks:
3099
        self.cfg.SetDiskID(disk, pnode_uuid)
3100
      rename_back_list = [(n.children[0], o.logical_id)
3101
                          for (n, o) in zip(new_disks, self.instance.disks)]
3102
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3103
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3104
      raise
3105

    
3106
    # at this point, the instance has been modified
3107
    self.instance.disk_template = constants.DT_DRBD8
3108
    self.instance.disks = new_disks
3109
    self.cfg.Update(self.instance, feedback_fn)
3110

    
3111
    # Release node locks while waiting for sync
3112
    ReleaseLocks(self, locking.LEVEL_NODE)
3113

    
3114
    # disks are created, waiting for sync
3115
    disk_abort = not WaitForSync(self, self.instance,
3116
                                 oneshot=not self.op.wait_for_sync)
3117
    if disk_abort:
3118
      raise errors.OpExecError("There are some degraded disks for"
3119
                               " this instance, please cleanup manually")
3120

    
3121
    # Node resource locks will be released by caller
3122

    
3123
  def _ConvertDrbdToPlain(self, feedback_fn):
3124
    """Converts an instance from drbd to plain.
3125

3126
    """
3127
    assert len(self.instance.secondary_nodes) == 1
3128
    assert self.instance.disk_template == constants.DT_DRBD8
3129

    
3130
    pnode_uuid = self.instance.primary_node
3131
    snode_uuid = self.instance.secondary_nodes[0]
3132
    feedback_fn("Converting template to plain")
3133

    
3134
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3135
    new_disks = [d.children[0] for d in self.instance.disks]
3136

    
3137
    # copy over size, mode and name
3138
    for parent, child in zip(old_disks, new_disks):
3139
      child.size = parent.size
3140
      child.mode = parent.mode
3141
      child.name = parent.name
3142

    
3143
    # this is a DRBD disk, return its port to the pool
3144
    # NOTE: this must be done right before the call to cfg.Update!
3145
    for disk in old_disks:
3146
      tcp_port = disk.logical_id[2]
3147
      self.cfg.AddTcpUdpPort(tcp_port)
3148

    
3149
    # update instance structure
3150
    self.instance.disks = new_disks
3151
    self.instance.disk_template = constants.DT_PLAIN
3152
    _UpdateIvNames(0, self.instance.disks)
3153
    self.cfg.Update(self.instance, feedback_fn)
3154

    
3155
    # Release locks in case removing disks takes a while
3156
    ReleaseLocks(self, locking.LEVEL_NODE)
3157

    
3158
    feedback_fn("Removing volumes on the secondary node...")
3159
    for disk in old_disks:
3160
      self.cfg.SetDiskID(disk, snode_uuid)
3161
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3162
      if msg:
3163
        self.LogWarning("Could not remove block device %s on node %s,"
3164
                        " continuing anyway: %s", disk.iv_name,
3165
                        self.cfg.GetNodeName(snode_uuid), msg)
3166

    
3167
    feedback_fn("Removing unneeded volumes on the primary node...")
3168
    for idx, disk in enumerate(old_disks):
3169
      meta = disk.children[1]
3170
      self.cfg.SetDiskID(meta, pnode_uuid)
3171
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3172
      if msg:
3173
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3174
                        " continuing anyway: %s", idx,
3175
                        self.cfg.GetNodeName(pnode_uuid), msg)
3176

    
3177
  def _CreateNewDisk(self, idx, params, _):
3178
    """Creates a new disk.
3179

3180
    """
3181
    # add a new disk
3182
    if self.instance.disk_template in constants.DTS_FILEBASED:
3183
      (file_driver, file_path) = self.instance.disks[0].logical_id
3184
      file_path = os.path.dirname(file_path)
3185
    else:
3186
      file_driver = file_path = None
3187

    
3188
    disk = \
3189
      GenerateDiskTemplate(self, self.instance.disk_template,
3190
                           self.instance.uuid, self.instance.primary_node,
3191
                           self.instance.secondary_nodes, [params], file_path,
3192
                           file_driver, idx, self.Log, self.diskparams)[0]
3193

    
3194
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3195

    
3196
    if self.cluster.prealloc_wipe_disks:
3197
      # Wipe new disk
3198
      WipeOrCleanupDisks(self, self.instance,
3199
                         disks=[(idx, disk, 0)],
3200
                         cleanup=new_disks)
3201

    
3202
    return (disk, [
3203
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3204
      ])
3205

    
3206
  @staticmethod
3207
  def _ModifyDisk(idx, disk, params, _):
3208
    """Modifies a disk.
3209

3210
    """
3211
    changes = []
3212
    mode = params.get(constants.IDISK_MODE, None)
3213
    if mode:
3214
      disk.mode = mode
3215
      changes.append(("disk.mode/%d" % idx, disk.mode))
3216

    
3217
    name = params.get(constants.IDISK_NAME, None)
3218
    disk.name = name
3219
    changes.append(("disk.name/%d" % idx, disk.name))
3220

    
3221
    return changes
3222

    
3223
  def _RemoveDisk(self, idx, root, _):
3224
    """Removes a disk.
3225

3226
    """
3227
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3228
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3229
                             self.instance.primary_node):
3230
      self.cfg.SetDiskID(disk, node_uuid)
3231
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3232
      if msg:
3233
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3234
                        " continuing anyway", idx,
3235
                        self.cfg.GetNodeName(node_uuid), msg)
3236

    
3237
    # if this is a DRBD disk, return its port to the pool
3238
    if root.dev_type in constants.LDS_DRBD:
3239
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3240

    
3241
  def _CreateNewNic(self, idx, params, private):
3242
    """Creates data structure for a new network interface.
3243

3244
    """
3245
    mac = params[constants.INIC_MAC]
3246
    ip = params.get(constants.INIC_IP, None)
3247
    net = params.get(constants.INIC_NETWORK, None)
3248
    name = params.get(constants.INIC_NAME, None)
3249
    net_uuid = self.cfg.LookupNetwork(net)
3250
    #TODO: not private.filled?? can a nic have no nicparams??
3251
    nicparams = private.filled
3252
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3253
                       nicparams=nicparams)
3254
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3255

    
3256
    return (nobj, [
3257
      ("nic.%d" % idx,
3258
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3259
       (mac, ip, private.filled[constants.NIC_MODE],
3260
       private.filled[constants.NIC_LINK],
3261
       net)),
3262
      ])
3263

    
3264
  def _ApplyNicMods(self, idx, nic, params, private):
3265
    """Modifies a network interface.
3266

3267
    """
3268
    changes = []
3269

    
3270
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3271
      if key in params:
3272
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3273
        setattr(nic, key, params[key])
3274

    
3275
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3276
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3277
    if new_net_uuid != nic.network:
3278
      changes.append(("nic.network/%d" % idx, new_net))
3279
      nic.network = new_net_uuid
3280

    
3281
    if private.filled:
3282
      nic.nicparams = private.filled
3283

    
3284
      for (key, val) in nic.nicparams.items():
3285
        changes.append(("nic.%s/%d" % (key, idx), val))
3286

    
3287
    return changes
3288

    
3289
  def Exec(self, feedback_fn):
3290
    """Modifies an instance.
3291

3292
    All parameters take effect only at the next restart of the instance.
3293

3294
    """
3295
    # Process here the warnings from CheckPrereq, as we don't have a
3296
    # feedback_fn there.
3297
    # TODO: Replace with self.LogWarning
3298
    for warn in self.warn:
3299
      feedback_fn("WARNING: %s" % warn)
3300

    
3301
    assert ((self.op.disk_template is None) ^
3302
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3303
      "Not owning any node resource locks"
3304

    
3305
    result = []
3306

    
3307
    # New primary node
3308
    if self.op.pnode_uuid:
3309
      self.instance.primary_node = self.op.pnode_uuid
3310

    
3311
    # runtime memory
3312
    if self.op.runtime_mem:
3313
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3314
                                                     self.instance,
3315
                                                     self.op.runtime_mem)
3316
      rpcres.Raise("Cannot modify instance runtime memory")
3317
      result.append(("runtime_memory", self.op.runtime_mem))
3318

    
3319
    # Apply disk changes
3320
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3321
                        self._CreateNewDisk, self._ModifyDisk,
3322
                        self._RemoveDisk)
3323
    _UpdateIvNames(0, self.instance.disks)
3324

    
3325
    if self.op.disk_template:
3326
      if __debug__:
3327
        check_nodes = set(self.instance.all_nodes)
3328
        if self.op.remote_node_uuid:
3329
          check_nodes.add(self.op.remote_node_uuid)
3330
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3331
          owned = self.owned_locks(level)
3332
          assert not (check_nodes - owned), \
3333
            ("Not owning the correct locks, owning %r, expected at least %r" %
3334
             (owned, check_nodes))
3335

    
3336
      r_shut = ShutdownInstanceDisks(self, self.instance)
3337
      if not r_shut:
3338
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3339
                                 " proceed with disk template conversion")
3340
      mode = (self.instance.disk_template, self.op.disk_template)
3341
      try:
3342
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3343
      except:
3344
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3345
        raise
3346
      result.append(("disk_template", self.op.disk_template))
3347

    
3348
      assert self.instance.disk_template == self.op.disk_template, \
3349
        ("Expected disk template '%s', found '%s'" %
3350
         (self.op.disk_template, self.instance.disk_template))
3351

    
3352
    # Release node and resource locks if there are any (they might already have
3353
    # been released during disk conversion)
3354
    ReleaseLocks(self, locking.LEVEL_NODE)
3355
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3356

    
3357
    # Apply NIC changes
3358
    if self._new_nics is not None:
3359
      self.instance.nics = self._new_nics
3360
      result.extend(self._nic_chgdesc)
3361

    
3362
    # hvparams changes
3363
    if self.op.hvparams:
3364
      self.instance.hvparams = self.hv_inst
3365
      for key, val in self.op.hvparams.iteritems():
3366
        result.append(("hv/%s" % key, val))
3367

    
3368
    # beparams changes
3369
    if self.op.beparams:
3370
      self.instance.beparams = self.be_inst
3371
      for key, val in self.op.beparams.iteritems():
3372
        result.append(("be/%s" % key, val))
3373

    
3374
    # OS change
3375
    if self.op.os_name:
3376
      self.instance.os = self.op.os_name
3377

    
3378
    # osparams changes
3379
    if self.op.osparams:
3380
      self.instance.osparams = self.os_inst
3381
      for key, val in self.op.osparams.iteritems():
3382
        result.append(("os/%s" % key, val))
3383

    
3384
    if self.op.offline is None:
3385
      # Ignore
3386
      pass
3387
    elif self.op.offline:
3388
      # Mark instance as offline
3389
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3390
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3391
    else:
3392
      # Mark instance as online, but stopped
3393
      self.cfg.MarkInstanceDown(self.instance.uuid)
3394
      result.append(("admin_state", constants.ADMINST_DOWN))
3395

    
3396
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3397

    
3398
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3399
                self.owned_locks(locking.LEVEL_NODE)), \
3400
      "All node locks should have been released by now"
3401

    
3402
    return result
3403

    
3404
  _DISK_CONVERSIONS = {
3405
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3406
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3407
    }
3408

    
3409

    
3410
class LUInstanceChangeGroup(LogicalUnit):
3411
  HPATH = "instance-change-group"
3412
  HTYPE = constants.HTYPE_INSTANCE
3413
  REQ_BGL = False
3414

    
3415
  def ExpandNames(self):
3416
    self.share_locks = ShareAll()
3417

    
3418
    self.needed_locks = {
3419
      locking.LEVEL_NODEGROUP: [],
3420
      locking.LEVEL_NODE: [],
3421
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3422
      }
3423

    
3424
    self._ExpandAndLockInstance()
3425

    
3426
    if self.op.target_groups:
3427
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3428
                                  self.op.target_groups)
3429
    else:
3430
      self.req_target_uuids = None
3431

    
3432
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3433

    
3434
  def DeclareLocks(self, level):
3435
    if level == locking.LEVEL_NODEGROUP:
3436
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3437

    
3438
      if self.req_target_uuids:
3439
        lock_groups = set(self.req_target_uuids)
3440

    
3441
        # Lock all groups used by instance optimistically; this requires going
3442
        # via the node before it's locked, requiring verification later on
3443
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3444
        lock_groups.update(instance_groups)
3445
      else:
3446
        # No target groups, need to lock all of them
3447
        lock_groups = locking.ALL_SET
3448

    
3449
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3450

    
3451
    elif level == locking.LEVEL_NODE:
3452
      if self.req_target_uuids:
3453
        # Lock all nodes used by instances
3454
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3455
        self._LockInstancesNodes()
3456

    
3457
        # Lock all nodes in all potential target groups
3458
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3459
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3460
        member_nodes = [node_uuid
3461
                        for group in lock_groups
3462
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3463
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3464
      else:
3465
        # Lock all nodes as all groups are potential targets
3466
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3467

    
3468
  def CheckPrereq(self):
3469
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3470
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3471
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3472

    
3473
    assert (self.req_target_uuids is None or
3474
            owned_groups.issuperset(self.req_target_uuids))
3475
    assert owned_instance_names == set([self.op.instance_name])
3476

    
3477
    # Get instance information
3478
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3479

    
3480
    # Check if node groups for locked instance are still correct
3481
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3482
      ("Instance %s's nodes changed while we kept the lock" %
3483
       self.op.instance_name)
3484

    
3485
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3486
                                          owned_groups)
3487

    
3488
    if self.req_target_uuids:
3489
      # User requested specific target groups
3490
      self.target_uuids = frozenset(self.req_target_uuids)
3491
    else:
3492
      # All groups except those used by the instance are potential targets
3493
      self.target_uuids = owned_groups - inst_groups
3494

    
3495
    conflicting_groups = self.target_uuids & inst_groups
3496
    if conflicting_groups:
3497
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3498
                                 " used by the instance '%s'" %
3499
                                 (utils.CommaJoin(conflicting_groups),
3500
                                  self.op.instance_name),
3501
                                 errors.ECODE_INVAL)
3502

    
3503
    if not self.target_uuids:
3504
      raise errors.OpPrereqError("There are no possible target groups",
3505
                                 errors.ECODE_INVAL)
3506

    
3507
  def BuildHooksEnv(self):
3508
    """Build hooks env.
3509

3510
    """
3511
    assert self.target_uuids
3512

    
3513
    env = {
3514
      "TARGET_GROUPS": " ".join(self.target_uuids),
3515
      }
3516

    
3517
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3518

    
3519
    return env
3520

    
3521
  def BuildHooksNodes(self):
3522
    """Build hooks nodes.
3523

3524
    """
3525
    mn = self.cfg.GetMasterNode()
3526
    return ([mn], [mn])
3527

    
3528
  def Exec(self, feedback_fn):
3529
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3530

    
3531
    assert instances == [self.op.instance_name], "Instance not locked"
3532

    
3533
    req = iallocator.IAReqGroupChange(instances=instances,
3534
                                      target_groups=list(self.target_uuids))
3535
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3536

    
3537
    ial.Run(self.op.iallocator)
3538

    
3539
    if not ial.success:
3540
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3541
                                 " instance '%s' using iallocator '%s': %s" %
3542
                                 (self.op.instance_name, self.op.iallocator,
3543
                                  ial.info), errors.ECODE_NORES)
3544

    
3545
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3546

    
3547
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3548
                 " instance '%s'", len(jobs), self.op.instance_name)
3549

    
3550
    return ResultWithJobs(jobs)