Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ dab6ea3d

History | View | Annotate | Download (139.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node_uuid):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node_uuid: string
254
  @param node_uuid: node UUID
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def _CheckDiskTemplateValid(self):
326
    """Checks validity of disk template.
327

328
    """
329
    cluster = self.cfg.GetClusterInfo()
330
    if not self.op.disk_template in cluster.enabled_disk_templates:
331
      raise errors.OpPrereqError("Cannot create an instance with disk template"
332
                                 " '%s', because it is not enabled in the"
333
                                 " cluster. Enabled disk templates are: %s." %
334
                                 (self.op.disk_template,
335
                                  ",".join(cluster.enabled_disk_templates)))
336

    
337
  def _CheckDiskArguments(self):
338
    """Checks validity of disk-related arguments.
339

340
    """
341
    # check that disk's names are unique and valid
342
    utils.ValidateDeviceNames("disk", self.op.disks)
343

    
344
    self._CheckDiskTemplateValid()
345

    
346
    # check disks. parameter names and consistent adopt/no-adopt strategy
347
    has_adopt = has_no_adopt = False
348
    for disk in self.op.disks:
349
      if self.op.disk_template != constants.DT_EXT:
350
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
351
      if constants.IDISK_ADOPT in disk:
352
        has_adopt = True
353
      else:
354
        has_no_adopt = True
355
    if has_adopt and has_no_adopt:
356
      raise errors.OpPrereqError("Either all disks are adopted or none is",
357
                                 errors.ECODE_INVAL)
358
    if has_adopt:
359
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
360
        raise errors.OpPrereqError("Disk adoption is not supported for the"
361
                                   " '%s' disk template" %
362
                                   self.op.disk_template,
363
                                   errors.ECODE_INVAL)
364
      if self.op.iallocator is not None:
365
        raise errors.OpPrereqError("Disk adoption not allowed with an"
366
                                   " iallocator script", errors.ECODE_INVAL)
367
      if self.op.mode == constants.INSTANCE_IMPORT:
368
        raise errors.OpPrereqError("Disk adoption not allowed for"
369
                                   " instance import", errors.ECODE_INVAL)
370
    else:
371
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
372
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
373
                                   " but no 'adopt' parameter given" %
374
                                   self.op.disk_template,
375
                                   errors.ECODE_INVAL)
376

    
377
    self.adopt_disks = has_adopt
378

    
379
  def CheckArguments(self):
380
    """Check arguments.
381

382
    """
383
    # do not require name_check to ease forward/backward compatibility
384
    # for tools
385
    if self.op.no_install and self.op.start:
386
      self.LogInfo("No-installation mode selected, disabling startup")
387
      self.op.start = False
388
    # validate/normalize the instance name
389
    self.op.instance_name = \
390
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
391

    
392
    if self.op.ip_check and not self.op.name_check:
393
      # TODO: make the ip check more flexible and not depend on the name check
394
      raise errors.OpPrereqError("Cannot do IP address check without a name"
395
                                 " check", errors.ECODE_INVAL)
396

    
397
    # check nics' parameter names
398
    for nic in self.op.nics:
399
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
400
    # check that NIC's parameters names are unique and valid
401
    utils.ValidateDeviceNames("NIC", self.op.nics)
402

    
403
    self._CheckDiskArguments()
404

    
405
    # instance name verification
406
    if self.op.name_check:
407
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
408
      self.op.instance_name = self.hostname.name
409
      # used in CheckPrereq for ip ping check
410
      self.check_ip = self.hostname.ip
411
    else:
412
      self.check_ip = None
413

    
414
    # file storage checks
415
    if (self.op.file_driver and
416
        not self.op.file_driver in constants.FILE_DRIVER):
417
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
418
                                 self.op.file_driver, errors.ECODE_INVAL)
419

    
420
    if self.op.disk_template == constants.DT_FILE:
421
      opcodes.RequireFileStorage()
422
    elif self.op.disk_template == constants.DT_SHARED_FILE:
423
      opcodes.RequireSharedFileStorage()
424

    
425
    ### Node/iallocator related checks
426
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
427

    
428
    if self.op.pnode is not None:
429
      if self.op.disk_template in constants.DTS_INT_MIRROR:
430
        if self.op.snode is None:
431
          raise errors.OpPrereqError("The networked disk templates need"
432
                                     " a mirror node", errors.ECODE_INVAL)
433
      elif self.op.snode:
434
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
435
                        " template")
436
        self.op.snode = None
437

    
438
    _CheckOpportunisticLocking(self.op)
439

    
440
    self._cds = GetClusterDomainSecret()
441

    
442
    if self.op.mode == constants.INSTANCE_IMPORT:
443
      # On import force_variant must be True, because if we forced it at
444
      # initial install, our only chance when importing it back is that it
445
      # works again!
446
      self.op.force_variant = True
447

    
448
      if self.op.no_install:
449
        self.LogInfo("No-installation mode has no effect during import")
450

    
451
    elif self.op.mode == constants.INSTANCE_CREATE:
452
      if self.op.os_type is None:
453
        raise errors.OpPrereqError("No guest OS specified",
454
                                   errors.ECODE_INVAL)
455
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
456
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
457
                                   " installation" % self.op.os_type,
458
                                   errors.ECODE_STATE)
459
      if self.op.disk_template is None:
460
        raise errors.OpPrereqError("No disk template specified",
461
                                   errors.ECODE_INVAL)
462

    
463
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
464
      # Check handshake to ensure both clusters have the same domain secret
465
      src_handshake = self.op.source_handshake
466
      if not src_handshake:
467
        raise errors.OpPrereqError("Missing source handshake",
468
                                   errors.ECODE_INVAL)
469

    
470
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
471
                                                           src_handshake)
472
      if errmsg:
473
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
474
                                   errors.ECODE_INVAL)
475

    
476
      # Load and check source CA
477
      self.source_x509_ca_pem = self.op.source_x509_ca
478
      if not self.source_x509_ca_pem:
479
        raise errors.OpPrereqError("Missing source X509 CA",
480
                                   errors.ECODE_INVAL)
481

    
482
      try:
483
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
484
                                                    self._cds)
485
      except OpenSSL.crypto.Error, err:
486
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
487
                                   (err, ), errors.ECODE_INVAL)
488

    
489
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
490
      if errcode is not None:
491
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
492
                                   errors.ECODE_INVAL)
493

    
494
      self.source_x509_ca = cert
495

    
496
      src_instance_name = self.op.source_instance_name
497
      if not src_instance_name:
498
        raise errors.OpPrereqError("Missing source instance name",
499
                                   errors.ECODE_INVAL)
500

    
501
      self.source_instance_name = \
502
        netutils.GetHostname(name=src_instance_name).name
503

    
504
    else:
505
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
506
                                 self.op.mode, errors.ECODE_INVAL)
507

    
508
  def ExpandNames(self):
509
    """ExpandNames for CreateInstance.
510

511
    Figure out the right locks for instance creation.
512

513
    """
514
    self.needed_locks = {}
515

    
516
    # this is just a preventive check, but someone might still add this
517
    # instance in the meantime, and creation will fail at lock-add time
518
    if self.op.instance_name in\
519
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
520
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
521
                                 self.op.instance_name, errors.ECODE_EXISTS)
522

    
523
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
524

    
525
    if self.op.iallocator:
526
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
527
      # specifying a group on instance creation and then selecting nodes from
528
      # that group
529
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
530
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
531

    
532
      if self.op.opportunistic_locking:
533
        self.opportunistic_locks[locking.LEVEL_NODE] = True
534
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
535
    else:
536
      (self.op.pnode_uuid, self.op.pnode) = \
537
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
538
      nodelist = [self.op.pnode_uuid]
539
      if self.op.snode is not None:
540
        (self.op.snode_uuid, self.op.snode) = \
541
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
542
        nodelist.append(self.op.snode_uuid)
543
      self.needed_locks[locking.LEVEL_NODE] = nodelist
544

    
545
    # in case of import lock the source node too
546
    if self.op.mode == constants.INSTANCE_IMPORT:
547
      src_node = self.op.src_node
548
      src_path = self.op.src_path
549

    
550
      if src_path is None:
551
        self.op.src_path = src_path = self.op.instance_name
552

    
553
      if src_node is None:
554
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
555
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
556
        self.op.src_node = None
557
        if os.path.isabs(src_path):
558
          raise errors.OpPrereqError("Importing an instance from a path"
559
                                     " requires a source node option",
560
                                     errors.ECODE_INVAL)
561
      else:
562
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
563
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
564
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
565
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
566
        if not os.path.isabs(src_path):
567
          self.op.src_path = src_path = \
568
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
569

    
570
    self.needed_locks[locking.LEVEL_NODE_RES] = \
571
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
572

    
573
  def _RunAllocator(self):
574
    """Run the allocator based on input opcode.
575

576
    """
577
    if self.op.opportunistic_locking:
578
      # Only consider nodes for which a lock is held
579
      node_name_whitelist = self.cfg.GetNodeNames(
580
        self.owned_locks(locking.LEVEL_NODE))
581
    else:
582
      node_name_whitelist = None
583

    
584
    #TODO Export network to iallocator so that it chooses a pnode
585
    #     in a nodegroup that has the desired network connected to
586
    req = _CreateInstanceAllocRequest(self.op, self.disks,
587
                                      self.nics, self.be_full,
588
                                      node_name_whitelist)
589
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
590

    
591
    ial.Run(self.op.iallocator)
592

    
593
    if not ial.success:
594
      # When opportunistic locks are used only a temporary failure is generated
595
      if self.op.opportunistic_locking:
596
        ecode = errors.ECODE_TEMP_NORES
597
      else:
598
        ecode = errors.ECODE_NORES
599

    
600
      raise errors.OpPrereqError("Can't compute nodes using"
601
                                 " iallocator '%s': %s" %
602
                                 (self.op.iallocator, ial.info),
603
                                 ecode)
604

    
605
    (self.op.pnode_uuid, self.op.pnode) = \
606
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
607
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
608
                 self.op.instance_name, self.op.iallocator,
609
                 utils.CommaJoin(ial.result))
610

    
611
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
612

    
613
    if req.RequiredNodes() == 2:
614
      (self.op.snode_uuid, self.op.snode) = \
615
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
616

    
617
  def BuildHooksEnv(self):
618
    """Build hooks env.
619

620
    This runs on master, primary and secondary nodes of the instance.
621

622
    """
623
    env = {
624
      "ADD_MODE": self.op.mode,
625
      }
626
    if self.op.mode == constants.INSTANCE_IMPORT:
627
      env["SRC_NODE"] = self.op.src_node
628
      env["SRC_PATH"] = self.op.src_path
629
      env["SRC_IMAGES"] = self.src_images
630

    
631
    env.update(BuildInstanceHookEnv(
632
      name=self.op.instance_name,
633
      primary_node_name=self.op.pnode,
634
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
635
      status=self.op.start,
636
      os_type=self.op.os_type,
637
      minmem=self.be_full[constants.BE_MINMEM],
638
      maxmem=self.be_full[constants.BE_MAXMEM],
639
      vcpus=self.be_full[constants.BE_VCPUS],
640
      nics=NICListToTuple(self, self.nics),
641
      disk_template=self.op.disk_template,
642
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
643
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
644
             for d in self.disks],
645
      bep=self.be_full,
646
      hvp=self.hv_full,
647
      hypervisor_name=self.op.hypervisor,
648
      tags=self.op.tags,
649
      ))
650

    
651
    return env
652

    
653
  def BuildHooksNodes(self):
654
    """Build hooks nodes.
655

656
    """
657
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
658
    return nl, nl
659

    
660
  def _ReadExportInfo(self):
661
    """Reads the export information from disk.
662

663
    It will override the opcode source node and path with the actual
664
    information, if these two were not specified before.
665

666
    @return: the export information
667

668
    """
669
    assert self.op.mode == constants.INSTANCE_IMPORT
670

    
671
    if self.op.src_node_uuid is None:
672
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
673
      exp_list = self.rpc.call_export_list(locked_nodes)
674
      found = False
675
      for node in exp_list:
676
        if exp_list[node].fail_msg:
677
          continue
678
        if self.op.src_path in exp_list[node].payload:
679
          found = True
680
          self.op.src_node = node
681
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
682
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
683
                                            self.op.src_path)
684
          break
685
      if not found:
686
        raise errors.OpPrereqError("No export found for relative path %s" %
687
                                   self.op.src_path, errors.ECODE_INVAL)
688

    
689
    CheckNodeOnline(self, self.op.src_node_uuid)
690
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
691
    result.Raise("No export or invalid export found in dir %s" %
692
                 self.op.src_path)
693

    
694
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
695
    if not export_info.has_section(constants.INISECT_EXP):
696
      raise errors.ProgrammerError("Corrupted export config",
697
                                   errors.ECODE_ENVIRON)
698

    
699
    ei_version = export_info.get(constants.INISECT_EXP, "version")
700
    if int(ei_version) != constants.EXPORT_VERSION:
701
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
702
                                 (ei_version, constants.EXPORT_VERSION),
703
                                 errors.ECODE_ENVIRON)
704
    return export_info
705

    
706
  def _ReadExportParams(self, einfo):
707
    """Use export parameters as defaults.
708

709
    In case the opcode doesn't specify (as in override) some instance
710
    parameters, then try to use them from the export information, if
711
    that declares them.
712

713
    """
714
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
715

    
716
    if self.op.disk_template is None:
717
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
718
        self.op.disk_template = einfo.get(constants.INISECT_INS,
719
                                          "disk_template")
720
        if self.op.disk_template not in constants.DISK_TEMPLATES:
721
          raise errors.OpPrereqError("Disk template specified in configuration"
722
                                     " file is not one of the allowed values:"
723
                                     " %s" %
724
                                     " ".join(constants.DISK_TEMPLATES),
725
                                     errors.ECODE_INVAL)
726
      else:
727
        raise errors.OpPrereqError("No disk template specified and the export"
728
                                   " is missing the disk_template information",
729
                                   errors.ECODE_INVAL)
730

    
731
    if not self.op.disks:
732
      disks = []
733
      # TODO: import the disk iv_name too
734
      for idx in range(constants.MAX_DISKS):
735
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
736
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
737
          disks.append({constants.IDISK_SIZE: disk_sz})
738
      self.op.disks = disks
739
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
740
        raise errors.OpPrereqError("No disk info specified and the export"
741
                                   " is missing the disk information",
742
                                   errors.ECODE_INVAL)
743

    
744
    if not self.op.nics:
745
      nics = []
746
      for idx in range(constants.MAX_NICS):
747
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
748
          ndict = {}
749
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
750
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
751
            ndict[name] = v
752
          nics.append(ndict)
753
        else:
754
          break
755
      self.op.nics = nics
756

    
757
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
758
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
759

    
760
    if (self.op.hypervisor is None and
761
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
762
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
763

    
764
    if einfo.has_section(constants.INISECT_HYP):
765
      # use the export parameters but do not override the ones
766
      # specified by the user
767
      for name, value in einfo.items(constants.INISECT_HYP):
768
        if name not in self.op.hvparams:
769
          self.op.hvparams[name] = value
770

    
771
    if einfo.has_section(constants.INISECT_BEP):
772
      # use the parameters, without overriding
773
      for name, value in einfo.items(constants.INISECT_BEP):
774
        if name not in self.op.beparams:
775
          self.op.beparams[name] = value
776
        # Compatibility for the old "memory" be param
777
        if name == constants.BE_MEMORY:
778
          if constants.BE_MAXMEM not in self.op.beparams:
779
            self.op.beparams[constants.BE_MAXMEM] = value
780
          if constants.BE_MINMEM not in self.op.beparams:
781
            self.op.beparams[constants.BE_MINMEM] = value
782
    else:
783
      # try to read the parameters old style, from the main section
784
      for name in constants.BES_PARAMETERS:
785
        if (name not in self.op.beparams and
786
            einfo.has_option(constants.INISECT_INS, name)):
787
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
788

    
789
    if einfo.has_section(constants.INISECT_OSP):
790
      # use the parameters, without overriding
791
      for name, value in einfo.items(constants.INISECT_OSP):
792
        if name not in self.op.osparams:
793
          self.op.osparams[name] = value
794

    
795
  def _RevertToDefaults(self, cluster):
796
    """Revert the instance parameters to the default values.
797

798
    """
799
    # hvparams
800
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
801
    for name in self.op.hvparams.keys():
802
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
803
        del self.op.hvparams[name]
804
    # beparams
805
    be_defs = cluster.SimpleFillBE({})
806
    for name in self.op.beparams.keys():
807
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
808
        del self.op.beparams[name]
809
    # nic params
810
    nic_defs = cluster.SimpleFillNIC({})
811
    for nic in self.op.nics:
812
      for name in constants.NICS_PARAMETERS:
813
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
814
          del nic[name]
815
    # osparams
816
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
817
    for name in self.op.osparams.keys():
818
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
819
        del self.op.osparams[name]
820

    
821
  def _CalculateFileStorageDir(self):
822
    """Calculate final instance file storage dir.
823

824
    """
825
    # file storage dir calculation/check
826
    self.instance_file_storage_dir = None
827
    if self.op.disk_template in constants.DTS_FILEBASED:
828
      # build the full file storage dir path
829
      joinargs = []
830

    
831
      if self.op.disk_template == constants.DT_SHARED_FILE:
832
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
833
      else:
834
        get_fsd_fn = self.cfg.GetFileStorageDir
835

    
836
      cfg_storagedir = get_fsd_fn()
837
      if not cfg_storagedir:
838
        raise errors.OpPrereqError("Cluster file storage dir not defined",
839
                                   errors.ECODE_STATE)
840
      joinargs.append(cfg_storagedir)
841

    
842
      if self.op.file_storage_dir is not None:
843
        joinargs.append(self.op.file_storage_dir)
844

    
845
      joinargs.append(self.op.instance_name)
846

    
847
      # pylint: disable=W0142
848
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
849

    
850
  def CheckPrereq(self): # pylint: disable=R0914
851
    """Check prerequisites.
852

853
    """
854
    self._CalculateFileStorageDir()
855

    
856
    if self.op.mode == constants.INSTANCE_IMPORT:
857
      export_info = self._ReadExportInfo()
858
      self._ReadExportParams(export_info)
859
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
860
    else:
861
      self._old_instance_name = None
862

    
863
    if (not self.cfg.GetVGName() and
864
        self.op.disk_template not in constants.DTS_NOT_LVM):
865
      raise errors.OpPrereqError("Cluster does not support lvm-based"
866
                                 " instances", errors.ECODE_STATE)
867

    
868
    if (self.op.hypervisor is None or
869
        self.op.hypervisor == constants.VALUE_AUTO):
870
      self.op.hypervisor = self.cfg.GetHypervisorType()
871

    
872
    cluster = self.cfg.GetClusterInfo()
873
    enabled_hvs = cluster.enabled_hypervisors
874
    if self.op.hypervisor not in enabled_hvs:
875
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
876
                                 " cluster (%s)" %
877
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
878
                                 errors.ECODE_STATE)
879

    
880
    # Check tag validity
881
    for tag in self.op.tags:
882
      objects.TaggableObject.ValidateTag(tag)
883

    
884
    # check hypervisor parameter syntax (locally)
885
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
886
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
887
                                      self.op.hvparams)
888
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
889
    hv_type.CheckParameterSyntax(filled_hvp)
890
    self.hv_full = filled_hvp
891
    # check that we don't specify global parameters on an instance
892
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
893
                         "instance", "cluster")
894

    
895
    # fill and remember the beparams dict
896
    self.be_full = _ComputeFullBeParams(self.op, cluster)
897

    
898
    # build os parameters
899
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
900

    
901
    # now that hvp/bep are in final format, let's reset to defaults,
902
    # if told to do so
903
    if self.op.identify_defaults:
904
      self._RevertToDefaults(cluster)
905

    
906
    # NIC buildup
907
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
908
                             self.proc.GetECId())
909

    
910
    # disk checks/pre-build
911
    default_vg = self.cfg.GetVGName()
912
    self.disks = ComputeDisks(self.op, default_vg)
913

    
914
    if self.op.mode == constants.INSTANCE_IMPORT:
915
      disk_images = []
916
      for idx in range(len(self.disks)):
917
        option = "disk%d_dump" % idx
918
        if export_info.has_option(constants.INISECT_INS, option):
919
          # FIXME: are the old os-es, disk sizes, etc. useful?
920
          export_name = export_info.get(constants.INISECT_INS, option)
921
          image = utils.PathJoin(self.op.src_path, export_name)
922
          disk_images.append(image)
923
        else:
924
          disk_images.append(False)
925

    
926
      self.src_images = disk_images
927

    
928
      if self.op.instance_name == self._old_instance_name:
929
        for idx, nic in enumerate(self.nics):
930
          if nic.mac == constants.VALUE_AUTO:
931
            nic_mac_ini = "nic%d_mac" % idx
932
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
933

    
934
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
935

    
936
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
937
    if self.op.ip_check:
938
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
939
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
940
                                   (self.check_ip, self.op.instance_name),
941
                                   errors.ECODE_NOTUNIQUE)
942

    
943
    #### mac address generation
944
    # By generating here the mac address both the allocator and the hooks get
945
    # the real final mac address rather than the 'auto' or 'generate' value.
946
    # There is a race condition between the generation and the instance object
947
    # creation, which means that we know the mac is valid now, but we're not
948
    # sure it will be when we actually add the instance. If things go bad
949
    # adding the instance will abort because of a duplicate mac, and the
950
    # creation job will fail.
951
    for nic in self.nics:
952
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
953
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
954

    
955
    #### allocator run
956

    
957
    if self.op.iallocator is not None:
958
      self._RunAllocator()
959

    
960
    # Release all unneeded node locks
961
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
962
                               self.op.src_node_uuid])
963
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
964
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
965
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
966

    
967
    assert (self.owned_locks(locking.LEVEL_NODE) ==
968
            self.owned_locks(locking.LEVEL_NODE_RES)), \
969
      "Node locks differ from node resource locks"
970

    
971
    #### node related checks
972

    
973
    # check primary node
974
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
975
    assert self.pnode is not None, \
976
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
977
    if pnode.offline:
978
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
979
                                 pnode.name, errors.ECODE_STATE)
980
    if pnode.drained:
981
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
982
                                 pnode.name, errors.ECODE_STATE)
983
    if not pnode.vm_capable:
984
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
985
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
986

    
987
    self.secondaries = []
988

    
989
    # Fill in any IPs from IP pools. This must happen here, because we need to
990
    # know the nic's primary node, as specified by the iallocator
991
    for idx, nic in enumerate(self.nics):
992
      net_uuid = nic.network
993
      if net_uuid is not None:
994
        nobj = self.cfg.GetNetwork(net_uuid)
995
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
996
        if netparams is None:
997
          raise errors.OpPrereqError("No netparams found for network"
998
                                     " %s. Propably not connected to"
999
                                     " node's %s nodegroup" %
1000
                                     (nobj.name, self.pnode.name),
1001
                                     errors.ECODE_INVAL)
1002
        self.LogInfo("NIC/%d inherits netparams %s" %
1003
                     (idx, netparams.values()))
1004
        nic.nicparams = dict(netparams)
1005
        if nic.ip is not None:
1006
          if nic.ip.lower() == constants.NIC_IP_POOL:
1007
            try:
1008
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1009
            except errors.ReservationError:
1010
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1011
                                         " from the address pool" % idx,
1012
                                         errors.ECODE_STATE)
1013
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1014
          else:
1015
            try:
1016
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1017
            except errors.ReservationError:
1018
              raise errors.OpPrereqError("IP address %s already in use"
1019
                                         " or does not belong to network %s" %
1020
                                         (nic.ip, nobj.name),
1021
                                         errors.ECODE_NOTUNIQUE)
1022

    
1023
      # net is None, ip None or given
1024
      elif self.op.conflicts_check:
1025
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1026

    
1027
    # mirror node verification
1028
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1029
      if self.op.snode_uuid == pnode.uuid:
1030
        raise errors.OpPrereqError("The secondary node cannot be the"
1031
                                   " primary node", errors.ECODE_INVAL)
1032
      CheckNodeOnline(self, self.op.snode_uuid)
1033
      CheckNodeNotDrained(self, self.op.snode_uuid)
1034
      CheckNodeVmCapable(self, self.op.snode_uuid)
1035
      self.secondaries.append(self.op.snode_uuid)
1036

    
1037
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1038
      if pnode.group != snode.group:
1039
        self.LogWarning("The primary and secondary nodes are in two"
1040
                        " different node groups; the disk parameters"
1041
                        " from the first disk's node group will be"
1042
                        " used")
1043

    
1044
    nodes = [pnode]
1045
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1046
      nodes.append(snode)
1047
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1048
    excl_stor = compat.any(map(has_es, nodes))
1049
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1050
      raise errors.OpPrereqError("Disk template %s not supported with"
1051
                                 " exclusive storage" % self.op.disk_template,
1052
                                 errors.ECODE_STATE)
1053
    for disk in self.disks:
1054
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1055

    
1056
    node_uuids = [pnode.uuid] + self.secondaries
1057

    
1058
    if not self.adopt_disks:
1059
      if self.op.disk_template == constants.DT_RBD:
1060
        # _CheckRADOSFreeSpace() is just a placeholder.
1061
        # Any function that checks prerequisites can be placed here.
1062
        # Check if there is enough space on the RADOS cluster.
1063
        CheckRADOSFreeSpace()
1064
      elif self.op.disk_template == constants.DT_EXT:
1065
        # FIXME: Function that checks prereqs if needed
1066
        pass
1067
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1068
        # Check lv size requirements, if not adopting
1069
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1070
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1071
      else:
1072
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1073
        pass
1074

    
1075
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1076
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1077
                                disk[constants.IDISK_ADOPT])
1078
                     for disk in self.disks])
1079
      if len(all_lvs) != len(self.disks):
1080
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1081
                                   errors.ECODE_INVAL)
1082
      for lv_name in all_lvs:
1083
        try:
1084
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1085
          # to ReserveLV uses the same syntax
1086
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1087
        except errors.ReservationError:
1088
          raise errors.OpPrereqError("LV named %s used by another instance" %
1089
                                     lv_name, errors.ECODE_NOTUNIQUE)
1090

    
1091
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1092
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1093

    
1094
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1095
                                       vg_names.payload.keys())[pnode.uuid]
1096
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1097
      node_lvs = node_lvs.payload
1098

    
1099
      delta = all_lvs.difference(node_lvs.keys())
1100
      if delta:
1101
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1102
                                   utils.CommaJoin(delta),
1103
                                   errors.ECODE_INVAL)
1104
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1105
      if online_lvs:
1106
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1107
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1108
                                   errors.ECODE_STATE)
1109
      # update the size of disk based on what is found
1110
      for dsk in self.disks:
1111
        dsk[constants.IDISK_SIZE] = \
1112
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1113
                                        dsk[constants.IDISK_ADOPT])][0]))
1114

    
1115
    elif self.op.disk_template == constants.DT_BLOCK:
1116
      # Normalize and de-duplicate device paths
1117
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1118
                       for disk in self.disks])
1119
      if len(all_disks) != len(self.disks):
1120
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1121
                                   errors.ECODE_INVAL)
1122
      baddisks = [d for d in all_disks
1123
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1124
      if baddisks:
1125
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1126
                                   " cannot be adopted" %
1127
                                   (utils.CommaJoin(baddisks),
1128
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1129
                                   errors.ECODE_INVAL)
1130

    
1131
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1132
                                            list(all_disks))[pnode.uuid]
1133
      node_disks.Raise("Cannot get block device information from node %s" %
1134
                       pnode.name)
1135
      node_disks = node_disks.payload
1136
      delta = all_disks.difference(node_disks.keys())
1137
      if delta:
1138
        raise errors.OpPrereqError("Missing block device(s): %s" %
1139
                                   utils.CommaJoin(delta),
1140
                                   errors.ECODE_INVAL)
1141
      for dsk in self.disks:
1142
        dsk[constants.IDISK_SIZE] = \
1143
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1144

    
1145
    # Verify instance specs
1146
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1147
    ispec = {
1148
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1149
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1150
      constants.ISPEC_DISK_COUNT: len(self.disks),
1151
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1152
                                  for disk in self.disks],
1153
      constants.ISPEC_NIC_COUNT: len(self.nics),
1154
      constants.ISPEC_SPINDLE_USE: spindle_use,
1155
      }
1156

    
1157
    group_info = self.cfg.GetNodeGroup(pnode.group)
1158
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1159
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1160
                                               self.op.disk_template)
1161
    if not self.op.ignore_ipolicy and res:
1162
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1163
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1164
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1165

    
1166
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1167

    
1168
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1169
    # check OS parameters (remotely)
1170
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1171

    
1172
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1173

    
1174
    #TODO: _CheckExtParams (remotely)
1175
    # Check parameters for extstorage
1176

    
1177
    # memory check on primary node
1178
    #TODO(dynmem): use MINMEM for checking
1179
    if self.op.start:
1180
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1181
                                self.op.hvparams)
1182
      CheckNodeFreeMemory(self, self.pnode.uuid,
1183
                          "creating instance %s" % self.op.instance_name,
1184
                          self.be_full[constants.BE_MAXMEM],
1185
                          self.op.hypervisor, hvfull)
1186

    
1187
    self.dry_run_result = list(node_uuids)
1188

    
1189
  def Exec(self, feedback_fn):
1190
    """Create and add the instance to the cluster.
1191

1192
    """
1193
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1194
                self.owned_locks(locking.LEVEL_NODE)), \
1195
      "Node locks differ from node resource locks"
1196
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1197

    
1198
    ht_kind = self.op.hypervisor
1199
    if ht_kind in constants.HTS_REQ_PORT:
1200
      network_port = self.cfg.AllocatePort()
1201
    else:
1202
      network_port = None
1203

    
1204
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1205

    
1206
    # This is ugly but we got a chicken-egg problem here
1207
    # We can only take the group disk parameters, as the instance
1208
    # has no disks yet (we are generating them right here).
1209
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1210
    disks = GenerateDiskTemplate(self,
1211
                                 self.op.disk_template,
1212
                                 instance_uuid, self.pnode.uuid,
1213
                                 self.secondaries,
1214
                                 self.disks,
1215
                                 self.instance_file_storage_dir,
1216
                                 self.op.file_driver,
1217
                                 0,
1218
                                 feedback_fn,
1219
                                 self.cfg.GetGroupDiskParams(nodegroup))
1220

    
1221
    iobj = objects.Instance(name=self.op.instance_name,
1222
                            uuid=instance_uuid,
1223
                            os=self.op.os_type,
1224
                            primary_node=self.pnode.uuid,
1225
                            nics=self.nics, disks=disks,
1226
                            disk_template=self.op.disk_template,
1227
                            disks_active=False,
1228
                            admin_state=constants.ADMINST_DOWN,
1229
                            network_port=network_port,
1230
                            beparams=self.op.beparams,
1231
                            hvparams=self.op.hvparams,
1232
                            hypervisor=self.op.hypervisor,
1233
                            osparams=self.op.osparams,
1234
                            )
1235

    
1236
    if self.op.tags:
1237
      for tag in self.op.tags:
1238
        iobj.AddTag(tag)
1239

    
1240
    if self.adopt_disks:
1241
      if self.op.disk_template == constants.DT_PLAIN:
1242
        # rename LVs to the newly-generated names; we need to construct
1243
        # 'fake' LV disks with the old data, plus the new unique_id
1244
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1245
        rename_to = []
1246
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1247
          rename_to.append(t_dsk.logical_id)
1248
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1249
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1250
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1251
                                               zip(tmp_disks, rename_to))
1252
        result.Raise("Failed to rename adoped LVs")
1253
    else:
1254
      feedback_fn("* creating instance disks...")
1255
      try:
1256
        CreateDisks(self, iobj)
1257
      except errors.OpExecError:
1258
        self.LogWarning("Device creation failed")
1259
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1260
        raise
1261

    
1262
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1263

    
1264
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1265

    
1266
    # Declare that we don't want to remove the instance lock anymore, as we've
1267
    # added the instance to the config
1268
    del self.remove_locks[locking.LEVEL_INSTANCE]
1269

    
1270
    if self.op.mode == constants.INSTANCE_IMPORT:
1271
      # Release unused nodes
1272
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1273
    else:
1274
      # Release all nodes
1275
      ReleaseLocks(self, locking.LEVEL_NODE)
1276

    
1277
    disk_abort = False
1278
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1279
      feedback_fn("* wiping instance disks...")
1280
      try:
1281
        WipeDisks(self, iobj)
1282
      except errors.OpExecError, err:
1283
        logging.exception("Wiping disks failed")
1284
        self.LogWarning("Wiping instance disks failed (%s)", err)
1285
        disk_abort = True
1286

    
1287
    if disk_abort:
1288
      # Something is already wrong with the disks, don't do anything else
1289
      pass
1290
    elif self.op.wait_for_sync:
1291
      disk_abort = not WaitForSync(self, iobj)
1292
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1293
      # make sure the disks are not degraded (still sync-ing is ok)
1294
      feedback_fn("* checking mirrors status")
1295
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1296
    else:
1297
      disk_abort = False
1298

    
1299
    if disk_abort:
1300
      RemoveDisks(self, iobj)
1301
      self.cfg.RemoveInstance(iobj.uuid)
1302
      # Make sure the instance lock gets removed
1303
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1304
      raise errors.OpExecError("There are some degraded disks for"
1305
                               " this instance")
1306

    
1307
    # instance disks are now active
1308
    iobj.disks_active = True
1309

    
1310
    # Release all node resource locks
1311
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1312

    
1313
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1314
      # we need to set the disks ID to the primary node, since the
1315
      # preceding code might or might have not done it, depending on
1316
      # disk template and other options
1317
      for disk in iobj.disks:
1318
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1319
      if self.op.mode == constants.INSTANCE_CREATE:
1320
        if not self.op.no_install:
1321
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1322
                        not self.op.wait_for_sync)
1323
          if pause_sync:
1324
            feedback_fn("* pausing disk sync to install instance OS")
1325
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1326
                                                              (iobj.disks,
1327
                                                               iobj), True)
1328
            for idx, success in enumerate(result.payload):
1329
              if not success:
1330
                logging.warn("pause-sync of instance %s for disk %d failed",
1331
                             self.op.instance_name, idx)
1332

    
1333
          feedback_fn("* running the instance OS create scripts...")
1334
          # FIXME: pass debug option from opcode to backend
1335
          os_add_result = \
1336
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1337
                                          self.op.debug_level)
1338
          if pause_sync:
1339
            feedback_fn("* resuming disk sync")
1340
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1341
                                                              (iobj.disks,
1342
                                                               iobj), False)
1343
            for idx, success in enumerate(result.payload):
1344
              if not success:
1345
                logging.warn("resume-sync of instance %s for disk %d failed",
1346
                             self.op.instance_name, idx)
1347

    
1348
          os_add_result.Raise("Could not add os for instance %s"
1349
                              " on node %s" % (self.op.instance_name,
1350
                                               self.pnode.name))
1351

    
1352
      else:
1353
        if self.op.mode == constants.INSTANCE_IMPORT:
1354
          feedback_fn("* running the instance OS import scripts...")
1355

    
1356
          transfers = []
1357

    
1358
          for idx, image in enumerate(self.src_images):
1359
            if not image:
1360
              continue
1361

    
1362
            # FIXME: pass debug option from opcode to backend
1363
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1364
                                               constants.IEIO_FILE, (image, ),
1365
                                               constants.IEIO_SCRIPT,
1366
                                               (iobj.disks[idx], idx),
1367
                                               None)
1368
            transfers.append(dt)
1369

    
1370
          import_result = \
1371
            masterd.instance.TransferInstanceData(self, feedback_fn,
1372
                                                  self.op.src_node_uuid,
1373
                                                  self.pnode.uuid,
1374
                                                  self.pnode.secondary_ip,
1375
                                                  iobj, transfers)
1376
          if not compat.all(import_result):
1377
            self.LogWarning("Some disks for instance %s on node %s were not"
1378
                            " imported successfully" % (self.op.instance_name,
1379
                                                        self.pnode.name))
1380

    
1381
          rename_from = self._old_instance_name
1382

    
1383
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1384
          feedback_fn("* preparing remote import...")
1385
          # The source cluster will stop the instance before attempting to make
1386
          # a connection. In some cases stopping an instance can take a long
1387
          # time, hence the shutdown timeout is added to the connection
1388
          # timeout.
1389
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1390
                             self.op.source_shutdown_timeout)
1391
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1392

    
1393
          assert iobj.primary_node == self.pnode.uuid
1394
          disk_results = \
1395
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1396
                                          self.source_x509_ca,
1397
                                          self._cds, timeouts)
1398
          if not compat.all(disk_results):
1399
            # TODO: Should the instance still be started, even if some disks
1400
            # failed to import (valid for local imports, too)?
1401
            self.LogWarning("Some disks for instance %s on node %s were not"
1402
                            " imported successfully" % (self.op.instance_name,
1403
                                                        self.pnode.name))
1404

    
1405
          rename_from = self.source_instance_name
1406

    
1407
        else:
1408
          # also checked in the prereq part
1409
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1410
                                       % self.op.mode)
1411

    
1412
        # Run rename script on newly imported instance
1413
        assert iobj.name == self.op.instance_name
1414
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1415
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1416
                                                   rename_from,
1417
                                                   self.op.debug_level)
1418
        result.Warn("Failed to run rename script for %s on node %s" %
1419
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1420

    
1421
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1422

    
1423
    if self.op.start:
1424
      iobj.admin_state = constants.ADMINST_UP
1425
      self.cfg.Update(iobj, feedback_fn)
1426
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1427
                   self.pnode.name)
1428
      feedback_fn("* starting instance...")
1429
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1430
                                            False, self.op.reason)
1431
      result.Raise("Could not start instance")
1432

    
1433
    return list(iobj.all_nodes)
1434

    
1435

    
1436
class LUInstanceRename(LogicalUnit):
1437
  """Rename an instance.
1438

1439
  """
1440
  HPATH = "instance-rename"
1441
  HTYPE = constants.HTYPE_INSTANCE
1442

    
1443
  def CheckArguments(self):
1444
    """Check arguments.
1445

1446
    """
1447
    if self.op.ip_check and not self.op.name_check:
1448
      # TODO: make the ip check more flexible and not depend on the name check
1449
      raise errors.OpPrereqError("IP address check requires a name check",
1450
                                 errors.ECODE_INVAL)
1451

    
1452
  def BuildHooksEnv(self):
1453
    """Build hooks env.
1454

1455
    This runs on master, primary and secondary nodes of the instance.
1456

1457
    """
1458
    env = BuildInstanceHookEnvByObject(self, self.instance)
1459
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1460
    return env
1461

    
1462
  def BuildHooksNodes(self):
1463
    """Build hooks nodes.
1464

1465
    """
1466
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1467
    return (nl, nl)
1468

    
1469
  def CheckPrereq(self):
1470
    """Check prerequisites.
1471

1472
    This checks that the instance is in the cluster and is not running.
1473

1474
    """
1475
    (self.op.instance_uuid, self.op.instance_name) = \
1476
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1477
                                self.op.instance_name)
1478
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1479
    assert instance is not None
1480
    CheckNodeOnline(self, instance.primary_node)
1481
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1482
                       msg="cannot rename")
1483
    self.instance = instance
1484

    
1485
    new_name = self.op.new_name
1486
    if self.op.name_check:
1487
      hostname = _CheckHostnameSane(self, new_name)
1488
      new_name = self.op.new_name = hostname.name
1489
      if (self.op.ip_check and
1490
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1491
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1492
                                   (hostname.ip, new_name),
1493
                                   errors.ECODE_NOTUNIQUE)
1494

    
1495
    instance_names = [inst.name for
1496
                      inst in self.cfg.GetAllInstancesInfo().values()]
1497
    if new_name in instance_names and new_name != instance.name:
1498
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1499
                                 new_name, errors.ECODE_EXISTS)
1500

    
1501
  def Exec(self, feedback_fn):
1502
    """Rename the instance.
1503

1504
    """
1505
    old_name = self.instance.name
1506

    
1507
    rename_file_storage = False
1508
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1509
        self.op.new_name != self.instance.name):
1510
      old_file_storage_dir = os.path.dirname(
1511
                               self.instance.disks[0].logical_id[1])
1512
      rename_file_storage = True
1513

    
1514
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1515
    # Change the instance lock. This is definitely safe while we hold the BGL.
1516
    # Otherwise the new lock would have to be added in acquired mode.
1517
    assert self.REQ_BGL
1518
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1519
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1520
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1521

    
1522
    # re-read the instance from the configuration after rename
1523
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1524

    
1525
    if rename_file_storage:
1526
      new_file_storage_dir = os.path.dirname(
1527
                               renamed_inst.disks[0].logical_id[1])
1528
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1529
                                                     old_file_storage_dir,
1530
                                                     new_file_storage_dir)
1531
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1532
                   " (but the instance has been renamed in Ganeti)" %
1533
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1534
                    old_file_storage_dir, new_file_storage_dir))
1535

    
1536
    StartInstanceDisks(self, renamed_inst, None)
1537
    # update info on disks
1538
    info = GetInstanceInfoText(renamed_inst)
1539
    for (idx, disk) in enumerate(renamed_inst.disks):
1540
      for node_uuid in renamed_inst.all_nodes:
1541
        self.cfg.SetDiskID(disk, node_uuid)
1542
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1543
        result.Warn("Error setting info on node %s for disk %s" %
1544
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1545
    try:
1546
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1547
                                                 renamed_inst, old_name,
1548
                                                 self.op.debug_level)
1549
      result.Warn("Could not run OS rename script for instance %s on node %s"
1550
                  " (but the instance has been renamed in Ganeti)" %
1551
                  (renamed_inst.name,
1552
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1553
                  self.LogWarning)
1554
    finally:
1555
      ShutdownInstanceDisks(self, renamed_inst)
1556

    
1557
    return renamed_inst.name
1558

    
1559

    
1560
class LUInstanceRemove(LogicalUnit):
1561
  """Remove an instance.
1562

1563
  """
1564
  HPATH = "instance-remove"
1565
  HTYPE = constants.HTYPE_INSTANCE
1566
  REQ_BGL = False
1567

    
1568
  def ExpandNames(self):
1569
    self._ExpandAndLockInstance()
1570
    self.needed_locks[locking.LEVEL_NODE] = []
1571
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1572
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1573

    
1574
  def DeclareLocks(self, level):
1575
    if level == locking.LEVEL_NODE:
1576
      self._LockInstancesNodes()
1577
    elif level == locking.LEVEL_NODE_RES:
1578
      # Copy node locks
1579
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1580
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1581

    
1582
  def BuildHooksEnv(self):
1583
    """Build hooks env.
1584

1585
    This runs on master, primary and secondary nodes of the instance.
1586

1587
    """
1588
    env = BuildInstanceHookEnvByObject(self, self.instance)
1589
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1590
    return env
1591

    
1592
  def BuildHooksNodes(self):
1593
    """Build hooks nodes.
1594

1595
    """
1596
    nl = [self.cfg.GetMasterNode()]
1597
    nl_post = list(self.instance.all_nodes) + nl
1598
    return (nl, nl_post)
1599

    
1600
  def CheckPrereq(self):
1601
    """Check prerequisites.
1602

1603
    This checks that the instance is in the cluster.
1604

1605
    """
1606
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1607
    assert self.instance is not None, \
1608
      "Cannot retrieve locked instance %s" % self.op.instance_name
1609

    
1610
  def Exec(self, feedback_fn):
1611
    """Remove the instance.
1612

1613
    """
1614
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1615
                 self.cfg.GetNodeName(self.instance.primary_node))
1616

    
1617
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1618
                                             self.instance,
1619
                                             self.op.shutdown_timeout,
1620
                                             self.op.reason)
1621
    if self.op.ignore_failures:
1622
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1623
    else:
1624
      result.Raise("Could not shutdown instance %s on node %s" %
1625
                   (self.instance.name,
1626
                    self.cfg.GetNodeName(self.instance.primary_node)))
1627

    
1628
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1629
            self.owned_locks(locking.LEVEL_NODE_RES))
1630
    assert not (set(self.instance.all_nodes) -
1631
                self.owned_locks(locking.LEVEL_NODE)), \
1632
      "Not owning correct locks"
1633

    
1634
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1635

    
1636

    
1637
class LUInstanceMove(LogicalUnit):
1638
  """Move an instance by data-copying.
1639

1640
  """
1641
  HPATH = "instance-move"
1642
  HTYPE = constants.HTYPE_INSTANCE
1643
  REQ_BGL = False
1644

    
1645
  def ExpandNames(self):
1646
    self._ExpandAndLockInstance()
1647
    (self.op.target_node_uuid, self.op.target_node) = \
1648
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1649
                            self.op.target_node)
1650
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1651
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1652
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1653

    
1654
  def DeclareLocks(self, level):
1655
    if level == locking.LEVEL_NODE:
1656
      self._LockInstancesNodes(primary_only=True)
1657
    elif level == locking.LEVEL_NODE_RES:
1658
      # Copy node locks
1659
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1660
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1661

    
1662
  def BuildHooksEnv(self):
1663
    """Build hooks env.
1664

1665
    This runs on master, primary and secondary nodes of the instance.
1666

1667
    """
1668
    env = {
1669
      "TARGET_NODE": self.op.target_node,
1670
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1671
      }
1672
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1673
    return env
1674

    
1675
  def BuildHooksNodes(self):
1676
    """Build hooks nodes.
1677

1678
    """
1679
    nl = [
1680
      self.cfg.GetMasterNode(),
1681
      self.instance.primary_node,
1682
      self.op.target_node_uuid,
1683
      ]
1684
    return (nl, nl)
1685

    
1686
  def CheckPrereq(self):
1687
    """Check prerequisites.
1688

1689
    This checks that the instance is in the cluster.
1690

1691
    """
1692
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1693
    assert self.instance is not None, \
1694
      "Cannot retrieve locked instance %s" % self.op.instance_name
1695

    
1696
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1697
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1698
                                 self.instance.disk_template,
1699
                                 errors.ECODE_STATE)
1700

    
1701
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1702
    assert target_node is not None, \
1703
      "Cannot retrieve locked node %s" % self.op.target_node
1704

    
1705
    self.target_node_uuid = target_node.uuid
1706
    if target_node.uuid == self.instance.primary_node:
1707
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1708
                                 (self.instance.name, target_node.name),
1709
                                 errors.ECODE_STATE)
1710

    
1711
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1712

    
1713
    for idx, dsk in enumerate(self.instance.disks):
1714
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1715
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1716
                                   " cannot copy" % idx, errors.ECODE_STATE)
1717

    
1718
    CheckNodeOnline(self, target_node.uuid)
1719
    CheckNodeNotDrained(self, target_node.uuid)
1720
    CheckNodeVmCapable(self, target_node.uuid)
1721
    cluster = self.cfg.GetClusterInfo()
1722
    group_info = self.cfg.GetNodeGroup(target_node.group)
1723
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1724
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1725
                           ignore=self.op.ignore_ipolicy)
1726

    
1727
    if self.instance.admin_state == constants.ADMINST_UP:
1728
      # check memory requirements on the secondary node
1729
      CheckNodeFreeMemory(
1730
          self, target_node.uuid, "failing over instance %s" %
1731
          self.instance.name, bep[constants.BE_MAXMEM],
1732
          self.instance.hypervisor,
1733
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1734
    else:
1735
      self.LogInfo("Not checking memory on the secondary node as"
1736
                   " instance will not be started")
1737

    
1738
    # check bridge existance
1739
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1740

    
1741
  def Exec(self, feedback_fn):
1742
    """Move an instance.
1743

1744
    The move is done by shutting it down on its present node, copying
1745
    the data over (slow) and starting it on the new node.
1746

1747
    """
1748
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1749
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1750

    
1751
    self.LogInfo("Shutting down instance %s on source node %s",
1752
                 self.instance.name, source_node.name)
1753

    
1754
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1755
            self.owned_locks(locking.LEVEL_NODE_RES))
1756

    
1757
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1758
                                             self.op.shutdown_timeout,
1759
                                             self.op.reason)
1760
    if self.op.ignore_consistency:
1761
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1762
                  " anyway. Please make sure node %s is down. Error details" %
1763
                  (self.instance.name, source_node.name, source_node.name),
1764
                  self.LogWarning)
1765
    else:
1766
      result.Raise("Could not shutdown instance %s on node %s" %
1767
                   (self.instance.name, source_node.name))
1768

    
1769
    # create the target disks
1770
    try:
1771
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1772
    except errors.OpExecError:
1773
      self.LogWarning("Device creation failed")
1774
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1775
      raise
1776

    
1777
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1778

    
1779
    errs = []
1780
    # activate, get path, copy the data over
1781
    for idx, disk in enumerate(self.instance.disks):
1782
      self.LogInfo("Copying data for disk %d", idx)
1783
      result = self.rpc.call_blockdev_assemble(
1784
                 target_node.uuid, (disk, self.instance), self.instance.name,
1785
                 True, idx)
1786
      if result.fail_msg:
1787
        self.LogWarning("Can't assemble newly created disk %d: %s",
1788
                        idx, result.fail_msg)
1789
        errs.append(result.fail_msg)
1790
        break
1791
      dev_path = result.payload
1792
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1793
                                                                self.instance),
1794
                                             target_node.name, dev_path,
1795
                                             cluster_name)
1796
      if result.fail_msg:
1797
        self.LogWarning("Can't copy data over for disk %d: %s",
1798
                        idx, result.fail_msg)
1799
        errs.append(result.fail_msg)
1800
        break
1801

    
1802
    if errs:
1803
      self.LogWarning("Some disks failed to copy, aborting")
1804
      try:
1805
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1806
      finally:
1807
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1808
        raise errors.OpExecError("Errors during disk copy: %s" %
1809
                                 (",".join(errs),))
1810

    
1811
    self.instance.primary_node = target_node.uuid
1812
    self.cfg.Update(self.instance, feedback_fn)
1813

    
1814
    self.LogInfo("Removing the disks on the original node")
1815
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1816

    
1817
    # Only start the instance if it's marked as up
1818
    if self.instance.admin_state == constants.ADMINST_UP:
1819
      self.LogInfo("Starting instance %s on node %s",
1820
                   self.instance.name, target_node.name)
1821

    
1822
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1823
                                          ignore_secondaries=True)
1824
      if not disks_ok:
1825
        ShutdownInstanceDisks(self, self.instance)
1826
        raise errors.OpExecError("Can't activate the instance's disks")
1827

    
1828
      result = self.rpc.call_instance_start(target_node.uuid,
1829
                                            (self.instance, None, None), False,
1830
                                            self.op.reason)
1831
      msg = result.fail_msg
1832
      if msg:
1833
        ShutdownInstanceDisks(self, self.instance)
1834
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1835
                                 (self.instance.name, target_node.name, msg))
1836

    
1837

    
1838
class LUInstanceMultiAlloc(NoHooksLU):
1839
  """Allocates multiple instances at the same time.
1840

1841
  """
1842
  REQ_BGL = False
1843

    
1844
  def CheckArguments(self):
1845
    """Check arguments.
1846

1847
    """
1848
    nodes = []
1849
    for inst in self.op.instances:
1850
      if inst.iallocator is not None:
1851
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1852
                                   " instance objects", errors.ECODE_INVAL)
1853
      nodes.append(bool(inst.pnode))
1854
      if inst.disk_template in constants.DTS_INT_MIRROR:
1855
        nodes.append(bool(inst.snode))
1856

    
1857
    has_nodes = compat.any(nodes)
1858
    if compat.all(nodes) ^ has_nodes:
1859
      raise errors.OpPrereqError("There are instance objects providing"
1860
                                 " pnode/snode while others do not",
1861
                                 errors.ECODE_INVAL)
1862

    
1863
    if self.op.iallocator is None:
1864
      default_iallocator = self.cfg.GetDefaultIAllocator()
1865
      if default_iallocator and has_nodes:
1866
        self.op.iallocator = default_iallocator
1867
      else:
1868
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1869
                                   " given and no cluster-wide default"
1870
                                   " iallocator found; please specify either"
1871
                                   " an iallocator or nodes on the instances"
1872
                                   " or set a cluster-wide default iallocator",
1873
                                   errors.ECODE_INVAL)
1874

    
1875
    _CheckOpportunisticLocking(self.op)
1876

    
1877
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1878
    if dups:
1879
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1880
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1881

    
1882
  def ExpandNames(self):
1883
    """Calculate the locks.
1884

1885
    """
1886
    self.share_locks = ShareAll()
1887
    self.needed_locks = {
1888
      # iallocator will select nodes and even if no iallocator is used,
1889
      # collisions with LUInstanceCreate should be avoided
1890
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1891
      }
1892

    
1893
    if self.op.iallocator:
1894
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1895
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1896

    
1897
      if self.op.opportunistic_locking:
1898
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1899
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1900
    else:
1901
      nodeslist = []
1902
      for inst in self.op.instances:
1903
        (inst.pnode_uuid, inst.pnode) = \
1904
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1905
        nodeslist.append(inst.pnode)
1906
        if inst.snode is not None:
1907
          (inst.snode_uuid, inst.snode) = \
1908
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1909
          nodeslist.append(inst.snode)
1910

    
1911
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1912
      # Lock resources of instance's primary and secondary nodes (copy to
1913
      # prevent accidential modification)
1914
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1915

    
1916
  def CheckPrereq(self):
1917
    """Check prerequisite.
1918

1919
    """
1920
    cluster = self.cfg.GetClusterInfo()
1921
    default_vg = self.cfg.GetVGName()
1922
    ec_id = self.proc.GetECId()
1923

    
1924
    if self.op.opportunistic_locking:
1925
      # Only consider nodes for which a lock is held
1926
      node_whitelist = self.cfg.GetNodeNames(
1927
                         list(self.owned_locks(locking.LEVEL_NODE)))
1928
    else:
1929
      node_whitelist = None
1930

    
1931
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1932
                                         _ComputeNics(op, cluster, None,
1933
                                                      self.cfg, ec_id),
1934
                                         _ComputeFullBeParams(op, cluster),
1935
                                         node_whitelist)
1936
             for op in self.op.instances]
1937

    
1938
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1939
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1940

    
1941
    ial.Run(self.op.iallocator)
1942

    
1943
    if not ial.success:
1944
      raise errors.OpPrereqError("Can't compute nodes using"
1945
                                 " iallocator '%s': %s" %
1946
                                 (self.op.iallocator, ial.info),
1947
                                 errors.ECODE_NORES)
1948

    
1949
    self.ia_result = ial.result
1950

    
1951
    if self.op.dry_run:
1952
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1953
        constants.JOB_IDS_KEY: [],
1954
        })
1955

    
1956
  def _ConstructPartialResult(self):
1957
    """Contructs the partial result.
1958

1959
    """
1960
    (allocatable, failed) = self.ia_result
1961
    return {
1962
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1963
        map(compat.fst, allocatable),
1964
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1965
      }
1966

    
1967
  def Exec(self, feedback_fn):
1968
    """Executes the opcode.
1969

1970
    """
1971
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1972
    (allocatable, failed) = self.ia_result
1973

    
1974
    jobs = []
1975
    for (name, node_names) in allocatable:
1976
      op = op2inst.pop(name)
1977

    
1978
      (op.pnode_uuid, op.pnode) = \
1979
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1980
      if len(node_names) > 1:
1981
        (op.snode_uuid, op.snode) = \
1982
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1983

    
1984
      jobs.append([op])
1985

    
1986
    missing = set(op2inst.keys()) - set(failed)
1987
    assert not missing, \
1988
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1989

    
1990
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1991

    
1992

    
1993
class _InstNicModPrivate:
1994
  """Data structure for network interface modifications.
1995

1996
  Used by L{LUInstanceSetParams}.
1997

1998
  """
1999
  def __init__(self):
2000
    self.params = None
2001
    self.filled = None
2002

    
2003

    
2004
def _PrepareContainerMods(mods, private_fn):
2005
  """Prepares a list of container modifications by adding a private data field.
2006

2007
  @type mods: list of tuples; (operation, index, parameters)
2008
  @param mods: List of modifications
2009
  @type private_fn: callable or None
2010
  @param private_fn: Callable for constructing a private data field for a
2011
    modification
2012
  @rtype: list
2013

2014
  """
2015
  if private_fn is None:
2016
    fn = lambda: None
2017
  else:
2018
    fn = private_fn
2019

    
2020
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2021

    
2022

    
2023
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2024
  """Checks if nodes have enough physical CPUs
2025

2026
  This function checks if all given nodes have the needed number of
2027
  physical CPUs. In case any node has less CPUs or we cannot get the
2028
  information from the node, this function raises an OpPrereqError
2029
  exception.
2030

2031
  @type lu: C{LogicalUnit}
2032
  @param lu: a logical unit from which we get configuration data
2033
  @type node_uuids: C{list}
2034
  @param node_uuids: the list of node UUIDs to check
2035
  @type requested: C{int}
2036
  @param requested: the minimum acceptable number of physical CPUs
2037
  @type hypervisor_specs: list of pairs (string, dict of strings)
2038
  @param hypervisor_specs: list of hypervisor specifications in
2039
      pairs (hypervisor_name, hvparams)
2040
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2041
      or we cannot check the node
2042

2043
  """
2044
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2045
  for node_uuid in node_uuids:
2046
    info = nodeinfo[node_uuid]
2047
    node_name = lu.cfg.GetNodeName(node_uuid)
2048
    info.Raise("Cannot get current information from node %s" % node_name,
2049
               prereq=True, ecode=errors.ECODE_ENVIRON)
2050
    (_, _, (hv_info, )) = info.payload
2051
    num_cpus = hv_info.get("cpu_total", None)
2052
    if not isinstance(num_cpus, int):
2053
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2054
                                 " on node %s, result was '%s'" %
2055
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2056
    if requested > num_cpus:
2057
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2058
                                 "required" % (node_name, num_cpus, requested),
2059
                                 errors.ECODE_NORES)
2060

    
2061

    
2062
def GetItemFromContainer(identifier, kind, container):
2063
  """Return the item refered by the identifier.
2064

2065
  @type identifier: string
2066
  @param identifier: Item index or name or UUID
2067
  @type kind: string
2068
  @param kind: One-word item description
2069
  @type container: list
2070
  @param container: Container to get the item from
2071

2072
  """
2073
  # Index
2074
  try:
2075
    idx = int(identifier)
2076
    if idx == -1:
2077
      # Append
2078
      absidx = len(container) - 1
2079
    elif idx < 0:
2080
      raise IndexError("Not accepting negative indices other than -1")
2081
    elif idx > len(container):
2082
      raise IndexError("Got %s index %s, but there are only %s" %
2083
                       (kind, idx, len(container)))
2084
    else:
2085
      absidx = idx
2086
    return (absidx, container[idx])
2087
  except ValueError:
2088
    pass
2089

    
2090
  for idx, item in enumerate(container):
2091
    if item.uuid == identifier or item.name == identifier:
2092
      return (idx, item)
2093

    
2094
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2095
                             (kind, identifier), errors.ECODE_NOENT)
2096

    
2097

    
2098
def _ApplyContainerMods(kind, container, chgdesc, mods,
2099
                        create_fn, modify_fn, remove_fn):
2100
  """Applies descriptions in C{mods} to C{container}.
2101

2102
  @type kind: string
2103
  @param kind: One-word item description
2104
  @type container: list
2105
  @param container: Container to modify
2106
  @type chgdesc: None or list
2107
  @param chgdesc: List of applied changes
2108
  @type mods: list
2109
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2110
  @type create_fn: callable
2111
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2112
    receives absolute item index, parameters and private data object as added
2113
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2114
    as list
2115
  @type modify_fn: callable
2116
  @param modify_fn: Callback for modifying an existing item
2117
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2118
    and private data object as added by L{_PrepareContainerMods}, returns
2119
    changes as list
2120
  @type remove_fn: callable
2121
  @param remove_fn: Callback on removing item; receives absolute item index,
2122
    item and private data object as added by L{_PrepareContainerMods}
2123

2124
  """
2125
  for (op, identifier, params, private) in mods:
2126
    changes = None
2127

    
2128
    if op == constants.DDM_ADD:
2129
      # Calculate where item will be added
2130
      # When adding an item, identifier can only be an index
2131
      try:
2132
        idx = int(identifier)
2133
      except ValueError:
2134
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2135
                                   " identifier for %s" % constants.DDM_ADD,
2136
                                   errors.ECODE_INVAL)
2137
      if idx == -1:
2138
        addidx = len(container)
2139
      else:
2140
        if idx < 0:
2141
          raise IndexError("Not accepting negative indices other than -1")
2142
        elif idx > len(container):
2143
          raise IndexError("Got %s index %s, but there are only %s" %
2144
                           (kind, idx, len(container)))
2145
        addidx = idx
2146

    
2147
      if create_fn is None:
2148
        item = params
2149
      else:
2150
        (item, changes) = create_fn(addidx, params, private)
2151

    
2152
      if idx == -1:
2153
        container.append(item)
2154
      else:
2155
        assert idx >= 0
2156
        assert idx <= len(container)
2157
        # list.insert does so before the specified index
2158
        container.insert(idx, item)
2159
    else:
2160
      # Retrieve existing item
2161
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2162

    
2163
      if op == constants.DDM_REMOVE:
2164
        assert not params
2165

    
2166
        if remove_fn is not None:
2167
          remove_fn(absidx, item, private)
2168

    
2169
        changes = [("%s/%s" % (kind, absidx), "remove")]
2170

    
2171
        assert container[absidx] == item
2172
        del container[absidx]
2173
      elif op == constants.DDM_MODIFY:
2174
        if modify_fn is not None:
2175
          changes = modify_fn(absidx, item, params, private)
2176
      else:
2177
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2178

    
2179
    assert _TApplyContModsCbChanges(changes)
2180

    
2181
    if not (chgdesc is None or changes is None):
2182
      chgdesc.extend(changes)
2183

    
2184

    
2185
def _UpdateIvNames(base_index, disks):
2186
  """Updates the C{iv_name} attribute of disks.
2187

2188
  @type disks: list of L{objects.Disk}
2189

2190
  """
2191
  for (idx, disk) in enumerate(disks):
2192
    disk.iv_name = "disk/%s" % (base_index + idx, )
2193

    
2194

    
2195
class LUInstanceSetParams(LogicalUnit):
2196
  """Modifies an instances's parameters.
2197

2198
  """
2199
  HPATH = "instance-modify"
2200
  HTYPE = constants.HTYPE_INSTANCE
2201
  REQ_BGL = False
2202

    
2203
  @staticmethod
2204
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2205
    assert ht.TList(mods)
2206
    assert not mods or len(mods[0]) in (2, 3)
2207

    
2208
    if mods and len(mods[0]) == 2:
2209
      result = []
2210

    
2211
      addremove = 0
2212
      for op, params in mods:
2213
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2214
          result.append((op, -1, params))
2215
          addremove += 1
2216

    
2217
          if addremove > 1:
2218
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2219
                                       " supported at a time" % kind,
2220
                                       errors.ECODE_INVAL)
2221
        else:
2222
          result.append((constants.DDM_MODIFY, op, params))
2223

    
2224
      assert verify_fn(result)
2225
    else:
2226
      result = mods
2227

    
2228
    return result
2229

    
2230
  @staticmethod
2231
  def _CheckMods(kind, mods, key_types, item_fn):
2232
    """Ensures requested disk/NIC modifications are valid.
2233

2234
    """
2235
    for (op, _, params) in mods:
2236
      assert ht.TDict(params)
2237

    
2238
      # If 'key_types' is an empty dict, we assume we have an
2239
      # 'ext' template and thus do not ForceDictType
2240
      if key_types:
2241
        utils.ForceDictType(params, key_types)
2242

    
2243
      if op == constants.DDM_REMOVE:
2244
        if params:
2245
          raise errors.OpPrereqError("No settings should be passed when"
2246
                                     " removing a %s" % kind,
2247
                                     errors.ECODE_INVAL)
2248
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2249
        item_fn(op, params)
2250
      else:
2251
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2252

    
2253
  @staticmethod
2254
  def _VerifyDiskModification(op, params, excl_stor):
2255
    """Verifies a disk modification.
2256

2257
    """
2258
    if op == constants.DDM_ADD:
2259
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2260
      if mode not in constants.DISK_ACCESS_SET:
2261
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2262
                                   errors.ECODE_INVAL)
2263

    
2264
      size = params.get(constants.IDISK_SIZE, None)
2265
      if size is None:
2266
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2267
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2268

    
2269
      try:
2270
        size = int(size)
2271
      except (TypeError, ValueError), err:
2272
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2273
                                   errors.ECODE_INVAL)
2274

    
2275
      params[constants.IDISK_SIZE] = size
2276
      name = params.get(constants.IDISK_NAME, None)
2277
      if name is not None and name.lower() == constants.VALUE_NONE:
2278
        params[constants.IDISK_NAME] = None
2279

    
2280
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2281

    
2282
    elif op == constants.DDM_MODIFY:
2283
      if constants.IDISK_SIZE in params:
2284
        raise errors.OpPrereqError("Disk size change not possible, use"
2285
                                   " grow-disk", errors.ECODE_INVAL)
2286
      if len(params) > 2:
2287
        raise errors.OpPrereqError("Disk modification doesn't support"
2288
                                   " additional arbitrary parameters",
2289
                                   errors.ECODE_INVAL)
2290
      name = params.get(constants.IDISK_NAME, None)
2291
      if name is not None and name.lower() == constants.VALUE_NONE:
2292
        params[constants.IDISK_NAME] = None
2293

    
2294
  @staticmethod
2295
  def _VerifyNicModification(op, params):
2296
    """Verifies a network interface modification.
2297

2298
    """
2299
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2300
      ip = params.get(constants.INIC_IP, None)
2301
      name = params.get(constants.INIC_NAME, None)
2302
      req_net = params.get(constants.INIC_NETWORK, None)
2303
      link = params.get(constants.NIC_LINK, None)
2304
      mode = params.get(constants.NIC_MODE, None)
2305
      if name is not None and name.lower() == constants.VALUE_NONE:
2306
        params[constants.INIC_NAME] = None
2307
      if req_net is not None:
2308
        if req_net.lower() == constants.VALUE_NONE:
2309
          params[constants.INIC_NETWORK] = None
2310
          req_net = None
2311
        elif link is not None or mode is not None:
2312
          raise errors.OpPrereqError("If network is given"
2313
                                     " mode or link should not",
2314
                                     errors.ECODE_INVAL)
2315

    
2316
      if op == constants.DDM_ADD:
2317
        macaddr = params.get(constants.INIC_MAC, None)
2318
        if macaddr is None:
2319
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2320

    
2321
      if ip is not None:
2322
        if ip.lower() == constants.VALUE_NONE:
2323
          params[constants.INIC_IP] = None
2324
        else:
2325
          if ip.lower() == constants.NIC_IP_POOL:
2326
            if op == constants.DDM_ADD and req_net is None:
2327
              raise errors.OpPrereqError("If ip=pool, parameter network"
2328
                                         " cannot be none",
2329
                                         errors.ECODE_INVAL)
2330
          else:
2331
            if not netutils.IPAddress.IsValid(ip):
2332
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2333
                                         errors.ECODE_INVAL)
2334

    
2335
      if constants.INIC_MAC in params:
2336
        macaddr = params[constants.INIC_MAC]
2337
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2338
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2339

    
2340
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2341
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2342
                                     " modifying an existing NIC",
2343
                                     errors.ECODE_INVAL)
2344

    
2345
  def CheckArguments(self):
2346
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2347
            self.op.hvparams or self.op.beparams or self.op.os_name or
2348
            self.op.offline is not None or self.op.runtime_mem or
2349
            self.op.pnode):
2350
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2351

    
2352
    if self.op.hvparams:
2353
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2354
                           "hypervisor", "instance", "cluster")
2355

    
2356
    self.op.disks = self._UpgradeDiskNicMods(
2357
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2358
    self.op.nics = self._UpgradeDiskNicMods(
2359
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2360

    
2361
    if self.op.disks and self.op.disk_template is not None:
2362
      raise errors.OpPrereqError("Disk template conversion and other disk"
2363
                                 " changes not supported at the same time",
2364
                                 errors.ECODE_INVAL)
2365

    
2366
    if (self.op.disk_template and
2367
        self.op.disk_template in constants.DTS_INT_MIRROR and
2368
        self.op.remote_node is None):
2369
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2370
                                 " one requires specifying a secondary node",
2371
                                 errors.ECODE_INVAL)
2372

    
2373
    # Check NIC modifications
2374
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2375
                    self._VerifyNicModification)
2376

    
2377
    if self.op.pnode:
2378
      (self.op.pnode_uuid, self.op.pnode) = \
2379
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2380

    
2381
  def ExpandNames(self):
2382
    self._ExpandAndLockInstance()
2383
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2384
    # Can't even acquire node locks in shared mode as upcoming changes in
2385
    # Ganeti 2.6 will start to modify the node object on disk conversion
2386
    self.needed_locks[locking.LEVEL_NODE] = []
2387
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2388
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2389
    # Look node group to look up the ipolicy
2390
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2391

    
2392
  def DeclareLocks(self, level):
2393
    if level == locking.LEVEL_NODEGROUP:
2394
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2395
      # Acquire locks for the instance's nodegroups optimistically. Needs
2396
      # to be verified in CheckPrereq
2397
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2398
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2399
    elif level == locking.LEVEL_NODE:
2400
      self._LockInstancesNodes()
2401
      if self.op.disk_template and self.op.remote_node:
2402
        (self.op.remote_node_uuid, self.op.remote_node) = \
2403
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2404
                                self.op.remote_node)
2405
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2406
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2407
      # Copy node locks
2408
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2409
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2410

    
2411
  def BuildHooksEnv(self):
2412
    """Build hooks env.
2413

2414
    This runs on the master, primary and secondaries.
2415

2416
    """
2417
    args = {}
2418
    if constants.BE_MINMEM in self.be_new:
2419
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2420
    if constants.BE_MAXMEM in self.be_new:
2421
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2422
    if constants.BE_VCPUS in self.be_new:
2423
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2424
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2425
    # information at all.
2426

    
2427
    if self._new_nics is not None:
2428
      nics = []
2429

    
2430
      for nic in self._new_nics:
2431
        n = copy.deepcopy(nic)
2432
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2433
        n.nicparams = nicparams
2434
        nics.append(NICToTuple(self, n))
2435

    
2436
      args["nics"] = nics
2437

    
2438
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2439
    if self.op.disk_template:
2440
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2441
    if self.op.runtime_mem:
2442
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2443

    
2444
    return env
2445

    
2446
  def BuildHooksNodes(self):
2447
    """Build hooks nodes.
2448

2449
    """
2450
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2451
    return (nl, nl)
2452

    
2453
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2454
                              old_params, cluster, pnode_uuid):
2455

    
2456
    update_params_dict = dict([(key, params[key])
2457
                               for key in constants.NICS_PARAMETERS
2458
                               if key in params])
2459

    
2460
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2461
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2462

    
2463
    new_net_uuid = None
2464
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2465
    if new_net_uuid_or_name:
2466
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2467
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2468

    
2469
    if old_net_uuid:
2470
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2471

    
2472
    if new_net_uuid:
2473
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2474
      if not netparams:
2475
        raise errors.OpPrereqError("No netparams found for the network"
2476
                                   " %s, probably not connected" %
2477
                                   new_net_obj.name, errors.ECODE_INVAL)
2478
      new_params = dict(netparams)
2479
    else:
2480
      new_params = GetUpdatedParams(old_params, update_params_dict)
2481

    
2482
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2483

    
2484
    new_filled_params = cluster.SimpleFillNIC(new_params)
2485
    objects.NIC.CheckParameterSyntax(new_filled_params)
2486

    
2487
    new_mode = new_filled_params[constants.NIC_MODE]
2488
    if new_mode == constants.NIC_MODE_BRIDGED:
2489
      bridge = new_filled_params[constants.NIC_LINK]
2490
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2491
      if msg:
2492
        msg = "Error checking bridges on node '%s': %s" % \
2493
                (self.cfg.GetNodeName(pnode_uuid), msg)
2494
        if self.op.force:
2495
          self.warn.append(msg)
2496
        else:
2497
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2498

    
2499
    elif new_mode == constants.NIC_MODE_ROUTED:
2500
      ip = params.get(constants.INIC_IP, old_ip)
2501
      if ip is None:
2502
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2503
                                   " on a routed NIC", errors.ECODE_INVAL)
2504

    
2505
    elif new_mode == constants.NIC_MODE_OVS:
2506
      # TODO: check OVS link
2507
      self.LogInfo("OVS links are currently not checked for correctness")
2508

    
2509
    if constants.INIC_MAC in params:
2510
      mac = params[constants.INIC_MAC]
2511
      if mac is None:
2512
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2513
                                   errors.ECODE_INVAL)
2514
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2515
        # otherwise generate the MAC address
2516
        params[constants.INIC_MAC] = \
2517
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2518
      else:
2519
        # or validate/reserve the current one
2520
        try:
2521
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2522
        except errors.ReservationError:
2523
          raise errors.OpPrereqError("MAC address '%s' already in use"
2524
                                     " in cluster" % mac,
2525
                                     errors.ECODE_NOTUNIQUE)
2526
    elif new_net_uuid != old_net_uuid:
2527

    
2528
      def get_net_prefix(net_uuid):
2529
        mac_prefix = None
2530
        if net_uuid:
2531
          nobj = self.cfg.GetNetwork(net_uuid)
2532
          mac_prefix = nobj.mac_prefix
2533

    
2534
        return mac_prefix
2535

    
2536
      new_prefix = get_net_prefix(new_net_uuid)
2537
      old_prefix = get_net_prefix(old_net_uuid)
2538
      if old_prefix != new_prefix:
2539
        params[constants.INIC_MAC] = \
2540
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2541

    
2542
    # if there is a change in (ip, network) tuple
2543
    new_ip = params.get(constants.INIC_IP, old_ip)
2544
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2545
      if new_ip:
2546
        # if IP is pool then require a network and generate one IP
2547
        if new_ip.lower() == constants.NIC_IP_POOL:
2548
          if new_net_uuid:
2549
            try:
2550
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2551
            except errors.ReservationError:
2552
              raise errors.OpPrereqError("Unable to get a free IP"
2553
                                         " from the address pool",
2554
                                         errors.ECODE_STATE)
2555
            self.LogInfo("Chose IP %s from network %s",
2556
                         new_ip,
2557
                         new_net_obj.name)
2558
            params[constants.INIC_IP] = new_ip
2559
          else:
2560
            raise errors.OpPrereqError("ip=pool, but no network found",
2561
                                       errors.ECODE_INVAL)
2562
        # Reserve new IP if in the new network if any
2563
        elif new_net_uuid:
2564
          try:
2565
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2566
            self.LogInfo("Reserving IP %s in network %s",
2567
                         new_ip, new_net_obj.name)
2568
          except errors.ReservationError:
2569
            raise errors.OpPrereqError("IP %s not available in network %s" %
2570
                                       (new_ip, new_net_obj.name),
2571
                                       errors.ECODE_NOTUNIQUE)
2572
        # new network is None so check if new IP is a conflicting IP
2573
        elif self.op.conflicts_check:
2574
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2575

    
2576
      # release old IP if old network is not None
2577
      if old_ip and old_net_uuid:
2578
        try:
2579
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2580
        except errors.AddressPoolError:
2581
          logging.warning("Release IP %s not contained in network %s",
2582
                          old_ip, old_net_obj.name)
2583

    
2584
    # there are no changes in (ip, network) tuple and old network is not None
2585
    elif (old_net_uuid is not None and
2586
          (req_link is not None or req_mode is not None)):
2587
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2588
                                 " a NIC that is connected to a network",
2589
                                 errors.ECODE_INVAL)
2590

    
2591
    private.params = new_params
2592
    private.filled = new_filled_params
2593

    
2594
  def _PreCheckDiskTemplate(self, pnode_info):
2595
    """CheckPrereq checks related to a new disk template."""
2596
    # Arguments are passed to avoid configuration lookups
2597
    pnode_uuid = self.instance.primary_node
2598
    if self.instance.disk_template == self.op.disk_template:
2599
      raise errors.OpPrereqError("Instance already has disk template %s" %
2600
                                 self.instance.disk_template,
2601
                                 errors.ECODE_INVAL)
2602

    
2603
    if (self.instance.disk_template,
2604
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2605
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2606
                                 " %s to %s" % (self.instance.disk_template,
2607
                                                self.op.disk_template),
2608
                                 errors.ECODE_INVAL)
2609
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2610
                       msg="cannot change disk template")
2611
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2612
      if self.op.remote_node_uuid == pnode_uuid:
2613
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2614
                                   " as the primary node of the instance" %
2615
                                   self.op.remote_node, errors.ECODE_STATE)
2616
      CheckNodeOnline(self, self.op.remote_node_uuid)
2617
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2618
      # FIXME: here we assume that the old instance type is DT_PLAIN
2619
      assert self.instance.disk_template == constants.DT_PLAIN
2620
      disks = [{constants.IDISK_SIZE: d.size,
2621
                constants.IDISK_VG: d.logical_id[0]}
2622
               for d in self.instance.disks]
2623
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2624
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2625

    
2626
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2627
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2628
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2629
                                                              snode_group)
2630
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2631
                             ignore=self.op.ignore_ipolicy)
2632
      if pnode_info.group != snode_info.group:
2633
        self.LogWarning("The primary and secondary nodes are in two"
2634
                        " different node groups; the disk parameters"
2635
                        " from the first disk's node group will be"
2636
                        " used")
2637

    
2638
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2639
      # Make sure none of the nodes require exclusive storage
2640
      nodes = [pnode_info]
2641
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2642
        assert snode_info
2643
        nodes.append(snode_info)
2644
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2645
      if compat.any(map(has_es, nodes)):
2646
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2647
                  " storage is enabled" % (self.instance.disk_template,
2648
                                           self.op.disk_template))
2649
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2650

    
2651
  def _PreCheckDisks(self, ispec):
2652
    """CheckPrereq checks related to disk changes.
2653

2654
    @type ispec: dict
2655
    @param ispec: instance specs to be updated with the new disks
2656

2657
    """
2658
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2659

    
2660
    excl_stor = compat.any(
2661
      rpc.GetExclusiveStorageForNodes(self.cfg,
2662
                                      self.instance.all_nodes).values()
2663
      )
2664

    
2665
    # Check disk modifications. This is done here and not in CheckArguments
2666
    # (as with NICs), because we need to know the instance's disk template
2667
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2668
    if self.instance.disk_template == constants.DT_EXT:
2669
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2670
    else:
2671
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2672
                      ver_fn)
2673

    
2674
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2675

    
2676
    # Check the validity of the `provider' parameter
2677
    if self.instance.disk_template in constants.DT_EXT:
2678
      for mod in self.diskmod:
2679
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2680
        if mod[0] == constants.DDM_ADD:
2681
          if ext_provider is None:
2682
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2683
                                       " '%s' missing, during disk add" %
2684
                                       (constants.DT_EXT,
2685
                                        constants.IDISK_PROVIDER),
2686
                                       errors.ECODE_NOENT)
2687
        elif mod[0] == constants.DDM_MODIFY:
2688
          if ext_provider:
2689
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2690
                                       " modification" %
2691
                                       constants.IDISK_PROVIDER,
2692
                                       errors.ECODE_INVAL)
2693
    else:
2694
      for mod in self.diskmod:
2695
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2696
        if ext_provider is not None:
2697
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2698
                                     " instances of type '%s'" %
2699
                                     (constants.IDISK_PROVIDER,
2700
                                      constants.DT_EXT),
2701
                                     errors.ECODE_INVAL)
2702

    
2703
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2704
      raise errors.OpPrereqError("Disk operations not supported for"
2705
                                 " diskless instances", errors.ECODE_INVAL)
2706

    
2707
    def _PrepareDiskMod(_, disk, params, __):
2708
      disk.name = params.get(constants.IDISK_NAME, None)
2709

    
2710
    # Verify disk changes (operating on a copy)
2711
    disks = copy.deepcopy(self.instance.disks)
2712
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2713
                        _PrepareDiskMod, None)
2714
    utils.ValidateDeviceNames("disk", disks)
2715
    if len(disks) > constants.MAX_DISKS:
2716
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2717
                                 " more" % constants.MAX_DISKS,
2718
                                 errors.ECODE_STATE)
2719
    disk_sizes = [disk.size for disk in self.instance.disks]
2720
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2721
                      self.diskmod if op == constants.DDM_ADD)
2722
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2723
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2724

    
2725
    if self.op.offline is not None and self.op.offline:
2726
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2727
                         msg="can't change to offline")
2728

    
2729
  def CheckPrereq(self):
2730
    """Check prerequisites.
2731

2732
    This only checks the instance list against the existing names.
2733

2734
    """
2735
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2736
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2737
    self.cluster = self.cfg.GetClusterInfo()
2738

    
2739
    assert self.instance is not None, \
2740
      "Cannot retrieve locked instance %s" % self.op.instance_name
2741

    
2742
    pnode_uuid = self.instance.primary_node
2743

    
2744
    self.warn = []
2745

    
2746
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2747
        not self.op.force):
2748
      # verify that the instance is not up
2749
      instance_info = self.rpc.call_instance_info(
2750
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2751
          self.instance.hvparams)
2752
      if instance_info.fail_msg:
2753
        self.warn.append("Can't get instance runtime information: %s" %
2754
                         instance_info.fail_msg)
2755
      elif instance_info.payload:
2756
        raise errors.OpPrereqError("Instance is still running on %s" %
2757
                                   self.cfg.GetNodeName(pnode_uuid),
2758
                                   errors.ECODE_STATE)
2759

    
2760
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2761
    node_uuids = list(self.instance.all_nodes)
2762
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2763

    
2764
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2765
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2766
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2767

    
2768
    # dictionary with instance information after the modification
2769
    ispec = {}
2770

    
2771
    # Prepare NIC modifications
2772
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2773

    
2774
    # OS change
2775
    if self.op.os_name and not self.op.force:
2776
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2777
                     self.op.force_variant)
2778
      instance_os = self.op.os_name
2779
    else:
2780
      instance_os = self.instance.os
2781

    
2782
    assert not (self.op.disk_template and self.op.disks), \
2783
      "Can't modify disk template and apply disk changes at the same time"
2784

    
2785
    if self.op.disk_template:
2786
      self._PreCheckDiskTemplate(pnode_info)
2787

    
2788
    self._PreCheckDisks(ispec)
2789

    
2790
    # hvparams processing
2791
    if self.op.hvparams:
2792
      hv_type = self.instance.hypervisor
2793
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2794
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2795
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2796

    
2797
      # local check
2798
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2799
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2800
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2801
      self.hv_inst = i_hvdict # the new dict (without defaults)
2802
    else:
2803
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2804
                                                   self.instance.os,
2805
                                                   self.instance.hvparams)
2806
      self.hv_new = self.hv_inst = {}
2807

    
2808
    # beparams processing
2809
    if self.op.beparams:
2810
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2811
                                  use_none=True)
2812
      objects.UpgradeBeParams(i_bedict)
2813
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2814
      be_new = self.cluster.SimpleFillBE(i_bedict)
2815
      self.be_proposed = self.be_new = be_new # the new actual values
2816
      self.be_inst = i_bedict # the new dict (without defaults)
2817
    else:
2818
      self.be_new = self.be_inst = {}
2819
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2820
    be_old = self.cluster.FillBE(self.instance)
2821

    
2822
    # CPU param validation -- checking every time a parameter is
2823
    # changed to cover all cases where either CPU mask or vcpus have
2824
    # changed
2825
    if (constants.BE_VCPUS in self.be_proposed and
2826
        constants.HV_CPU_MASK in self.hv_proposed):
2827
      cpu_list = \
2828
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2829
      # Verify mask is consistent with number of vCPUs. Can skip this
2830
      # test if only 1 entry in the CPU mask, which means same mask
2831
      # is applied to all vCPUs.
2832
      if (len(cpu_list) > 1 and
2833
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2834
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2835
                                   " CPU mask [%s]" %
2836
                                   (self.be_proposed[constants.BE_VCPUS],
2837
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2838
                                   errors.ECODE_INVAL)
2839

    
2840
      # Only perform this test if a new CPU mask is given
2841
      if constants.HV_CPU_MASK in self.hv_new:
2842
        # Calculate the largest CPU number requested
2843
        max_requested_cpu = max(map(max, cpu_list))
2844
        # Check that all of the instance's nodes have enough physical CPUs to
2845
        # satisfy the requested CPU mask
2846
        hvspecs = [(self.instance.hypervisor,
2847
                    self.cfg.GetClusterInfo()
2848
                      .hvparams[self.instance.hypervisor])]
2849
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2850
                                max_requested_cpu + 1,
2851
                                hvspecs)
2852

    
2853
    # osparams processing
2854
    if self.op.osparams:
2855
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2856
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2857
      self.os_inst = i_osdict # the new dict (without defaults)
2858
    else:
2859
      self.os_inst = {}
2860

    
2861
    #TODO(dynmem): do the appropriate check involving MINMEM
2862
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2863
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2864
      mem_check_list = [pnode_uuid]
2865
      if be_new[constants.BE_AUTO_BALANCE]:
2866
        # either we changed auto_balance to yes or it was from before
2867
        mem_check_list.extend(self.instance.secondary_nodes)
2868
      instance_info = self.rpc.call_instance_info(
2869
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2870
          self.instance.hvparams)
2871
      hvspecs = [(self.instance.hypervisor,
2872
                  self.cluster.hvparams[self.instance.hypervisor])]
2873
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2874
                                         hvspecs)
2875
      pninfo = nodeinfo[pnode_uuid]
2876
      msg = pninfo.fail_msg
2877
      if msg:
2878
        # Assume the primary node is unreachable and go ahead
2879
        self.warn.append("Can't get info from primary node %s: %s" %
2880
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2881
      else:
2882
        (_, _, (pnhvinfo, )) = pninfo.payload
2883
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2884
          self.warn.append("Node data from primary node %s doesn't contain"
2885
                           " free memory information" %
2886
                           self.cfg.GetNodeName(pnode_uuid))
2887
        elif instance_info.fail_msg:
2888
          self.warn.append("Can't get instance runtime information: %s" %
2889
                           instance_info.fail_msg)
2890
        else:
2891
          if instance_info.payload:
2892
            current_mem = int(instance_info.payload["memory"])
2893
          else:
2894
            # Assume instance not running
2895
            # (there is a slight race condition here, but it's not very
2896
            # probable, and we have no other way to check)
2897
            # TODO: Describe race condition
2898
            current_mem = 0
2899
          #TODO(dynmem): do the appropriate check involving MINMEM
2900
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2901
                      pnhvinfo["memory_free"])
2902
          if miss_mem > 0:
2903
            raise errors.OpPrereqError("This change will prevent the instance"
2904
                                       " from starting, due to %d MB of memory"
2905
                                       " missing on its primary node" %
2906
                                       miss_mem, errors.ECODE_NORES)
2907

    
2908
      if be_new[constants.BE_AUTO_BALANCE]:
2909
        for node_uuid, nres in nodeinfo.items():
2910
          if node_uuid not in self.instance.secondary_nodes:
2911
            continue
2912
          nres.Raise("Can't get info from secondary node %s" %
2913
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2914
                     ecode=errors.ECODE_STATE)
2915
          (_, _, (nhvinfo, )) = nres.payload
2916
          if not isinstance(nhvinfo.get("memory_free", None), int):
2917
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2918
                                       " memory information" %
2919
                                       self.cfg.GetNodeName(node_uuid),
2920
                                       errors.ECODE_STATE)
2921
          #TODO(dynmem): do the appropriate check involving MINMEM
2922
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2923
            raise errors.OpPrereqError("This change will prevent the instance"
2924
                                       " from failover to its secondary node"
2925
                                       " %s, due to not enough memory" %
2926
                                       self.cfg.GetNodeName(node_uuid),
2927
                                       errors.ECODE_STATE)
2928

    
2929
    if self.op.runtime_mem:
2930
      remote_info = self.rpc.call_instance_info(
2931
         self.instance.primary_node, self.instance.name,
2932
         self.instance.hypervisor,
2933
         self.cluster.hvparams[self.instance.hypervisor])
2934
      remote_info.Raise("Error checking node %s" %
2935
                        self.cfg.GetNodeName(self.instance.primary_node))
2936
      if not remote_info.payload: # not running already
2937
        raise errors.OpPrereqError("Instance %s is not running" %
2938
                                   self.instance.name, errors.ECODE_STATE)
2939

    
2940
      current_memory = remote_info.payload["memory"]
2941
      if (not self.op.force and
2942
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2943
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2944
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2945
                                   " and %d MB of memory unless --force is"
2946
                                   " given" %
2947
                                   (self.instance.name,
2948
                                    self.be_proposed[constants.BE_MINMEM],
2949
                                    self.be_proposed[constants.BE_MAXMEM]),
2950
                                   errors.ECODE_INVAL)
2951

    
2952
      delta = self.op.runtime_mem - current_memory
2953
      if delta > 0:
2954
        CheckNodeFreeMemory(
2955
            self, self.instance.primary_node,
2956
            "ballooning memory for instance %s" % self.instance.name, delta,
2957
            self.instance.hypervisor,
2958
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2959

    
2960
    # make self.cluster visible in the functions below
2961
    cluster = self.cluster
2962

    
2963
    def _PrepareNicCreate(_, params, private):
2964
      self._PrepareNicModification(params, private, None, None,
2965
                                   {}, cluster, pnode_uuid)
2966
      return (None, None)
2967

    
2968
    def _PrepareNicMod(_, nic, params, private):
2969
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2970
                                   nic.nicparams, cluster, pnode_uuid)
2971
      return None
2972

    
2973
    def _PrepareNicRemove(_, params, __):
2974
      ip = params.ip
2975
      net = params.network
2976
      if net is not None and ip is not None:
2977
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2978

    
2979
    # Verify NIC changes (operating on copy)
2980
    nics = self.instance.nics[:]
2981
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2982
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2983
    if len(nics) > constants.MAX_NICS:
2984
      raise errors.OpPrereqError("Instance has too many network interfaces"
2985
                                 " (%d), cannot add more" % constants.MAX_NICS,
2986
                                 errors.ECODE_STATE)
2987

    
2988
    # Pre-compute NIC changes (necessary to use result in hooks)
2989
    self._nic_chgdesc = []
2990
    if self.nicmod:
2991
      # Operate on copies as this is still in prereq
2992
      nics = [nic.Copy() for nic in self.instance.nics]
2993
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2994
                          self._CreateNewNic, self._ApplyNicMods, None)
2995
      # Verify that NIC names are unique and valid
2996
      utils.ValidateDeviceNames("NIC", nics)
2997
      self._new_nics = nics
2998
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2999
    else:
3000
      self._new_nics = None
3001
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3002

    
3003
    if not self.op.ignore_ipolicy:
3004
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3005
                                                              group_info)
3006

    
3007
      # Fill ispec with backend parameters
3008
      ispec[constants.ISPEC_SPINDLE_USE] = \
3009
        self.be_new.get(constants.BE_SPINDLE_USE, None)
3010
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3011
                                                         None)
3012

    
3013
      # Copy ispec to verify parameters with min/max values separately
3014
      if self.op.disk_template:
3015
        new_disk_template = self.op.disk_template
3016
      else:
3017
        new_disk_template = self.instance.disk_template
3018
      ispec_max = ispec.copy()
3019
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3020
        self.be_new.get(constants.BE_MAXMEM, None)
3021
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3022
                                                     new_disk_template)
3023
      ispec_min = ispec.copy()
3024
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3025
        self.be_new.get(constants.BE_MINMEM, None)
3026
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3027
                                                     new_disk_template)
3028

    
3029
      if (res_max or res_min):
3030
        # FIXME: Improve error message by including information about whether
3031
        # the upper or lower limit of the parameter fails the ipolicy.
3032
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3033
               (group_info, group_info.name,
3034
                utils.CommaJoin(set(res_max + res_min))))
3035
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3036

    
3037
  def _ConvertPlainToDrbd(self, feedback_fn):
3038
    """Converts an instance from plain to drbd.
3039

3040
    """
3041
    feedback_fn("Converting template to drbd")
3042
    pnode_uuid = self.instance.primary_node
3043
    snode_uuid = self.op.remote_node_uuid
3044

    
3045
    assert self.instance.disk_template == constants.DT_PLAIN
3046

    
3047
    # create a fake disk info for _GenerateDiskTemplate
3048
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3049
                  constants.IDISK_VG: d.logical_id[0],
3050
                  constants.IDISK_NAME: d.name}
3051
                 for d in self.instance.disks]
3052
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3053
                                     self.instance.uuid, pnode_uuid,
3054
                                     [snode_uuid], disk_info, None, None, 0,
3055
                                     feedback_fn, self.diskparams)
3056
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3057
                                        self.diskparams)
3058
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3059
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3060
    info = GetInstanceInfoText(self.instance)
3061
    feedback_fn("Creating additional volumes...")
3062
    # first, create the missing data and meta devices
3063
    for disk in anno_disks:
3064
      # unfortunately this is... not too nice
3065
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3066
                           info, True, p_excl_stor)
3067
      for child in disk.children:
3068
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3069
                             s_excl_stor)
3070
    # at this stage, all new LVs have been created, we can rename the
3071
    # old ones
3072
    feedback_fn("Renaming original volumes...")
3073
    rename_list = [(o, n.children[0].logical_id)
3074
                   for (o, n) in zip(self.instance.disks, new_disks)]
3075
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3076
    result.Raise("Failed to rename original LVs")
3077

    
3078
    feedback_fn("Initializing DRBD devices...")
3079
    # all child devices are in place, we can now create the DRBD devices
3080
    try:
3081
      for disk in anno_disks:
3082
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3083
                                       (snode_uuid, s_excl_stor)]:
3084
          f_create = node_uuid == pnode_uuid
3085
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3086
                               f_create, excl_stor)
3087
    except errors.GenericError, e:
3088
      feedback_fn("Initializing of DRBD devices failed;"
3089
                  " renaming back original volumes...")
3090
      for disk in new_disks:
3091
        self.cfg.SetDiskID(disk, pnode_uuid)
3092
      rename_back_list = [(n.children[0], o.logical_id)
3093
                          for (n, o) in zip(new_disks, self.instance.disks)]
3094
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3095
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3096
      raise
3097

    
3098
    # at this point, the instance has been modified
3099
    self.instance.disk_template = constants.DT_DRBD8
3100
    self.instance.disks = new_disks
3101
    self.cfg.Update(self.instance, feedback_fn)
3102

    
3103
    # Release node locks while waiting for sync
3104
    ReleaseLocks(self, locking.LEVEL_NODE)
3105

    
3106
    # disks are created, waiting for sync
3107
    disk_abort = not WaitForSync(self, self.instance,
3108
                                 oneshot=not self.op.wait_for_sync)
3109
    if disk_abort:
3110
      raise errors.OpExecError("There are some degraded disks for"
3111
                               " this instance, please cleanup manually")
3112

    
3113
    # Node resource locks will be released by caller
3114

    
3115
  def _ConvertDrbdToPlain(self, feedback_fn):
3116
    """Converts an instance from drbd to plain.
3117

3118
    """
3119
    assert len(self.instance.secondary_nodes) == 1
3120
    assert self.instance.disk_template == constants.DT_DRBD8
3121

    
3122
    pnode_uuid = self.instance.primary_node
3123
    snode_uuid = self.instance.secondary_nodes[0]
3124
    feedback_fn("Converting template to plain")
3125

    
3126
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3127
    new_disks = [d.children[0] for d in self.instance.disks]
3128

    
3129
    # copy over size, mode and name
3130
    for parent, child in zip(old_disks, new_disks):
3131
      child.size = parent.size
3132
      child.mode = parent.mode
3133
      child.name = parent.name
3134

    
3135
    # this is a DRBD disk, return its port to the pool
3136
    # NOTE: this must be done right before the call to cfg.Update!
3137
    for disk in old_disks:
3138
      tcp_port = disk.logical_id[2]
3139
      self.cfg.AddTcpUdpPort(tcp_port)
3140

    
3141
    # update instance structure
3142
    self.instance.disks = new_disks
3143
    self.instance.disk_template = constants.DT_PLAIN
3144
    _UpdateIvNames(0, self.instance.disks)
3145
    self.cfg.Update(self.instance, feedback_fn)
3146

    
3147
    # Release locks in case removing disks takes a while
3148
    ReleaseLocks(self, locking.LEVEL_NODE)
3149

    
3150
    feedback_fn("Removing volumes on the secondary node...")
3151
    for disk in old_disks:
3152
      self.cfg.SetDiskID(disk, snode_uuid)
3153
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3154
      if msg:
3155
        self.LogWarning("Could not remove block device %s on node %s,"
3156
                        " continuing anyway: %s", disk.iv_name,
3157
                        self.cfg.GetNodeName(snode_uuid), msg)
3158

    
3159
    feedback_fn("Removing unneeded volumes on the primary node...")
3160
    for idx, disk in enumerate(old_disks):
3161
      meta = disk.children[1]
3162
      self.cfg.SetDiskID(meta, pnode_uuid)
3163
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3164
      if msg:
3165
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3166
                        " continuing anyway: %s", idx,
3167
                        self.cfg.GetNodeName(pnode_uuid), msg)
3168

    
3169
  def _CreateNewDisk(self, idx, params, _):
3170
    """Creates a new disk.
3171

3172
    """
3173
    # add a new disk
3174
    if self.instance.disk_template in constants.DTS_FILEBASED:
3175
      (file_driver, file_path) = self.instance.disks[0].logical_id
3176
      file_path = os.path.dirname(file_path)
3177
    else:
3178
      file_driver = file_path = None
3179

    
3180
    disk = \
3181
      GenerateDiskTemplate(self, self.instance.disk_template,
3182
                           self.instance.uuid, self.instance.primary_node,
3183
                           self.instance.secondary_nodes, [params], file_path,
3184
                           file_driver, idx, self.Log, self.diskparams)[0]
3185

    
3186
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3187

    
3188
    if self.cluster.prealloc_wipe_disks:
3189
      # Wipe new disk
3190
      WipeOrCleanupDisks(self, self.instance,
3191
                         disks=[(idx, disk, 0)],
3192
                         cleanup=new_disks)
3193

    
3194
    return (disk, [
3195
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3196
      ])
3197

    
3198
  @staticmethod
3199
  def _ModifyDisk(idx, disk, params, _):
3200
    """Modifies a disk.
3201

3202
    """
3203
    changes = []
3204
    mode = params.get(constants.IDISK_MODE, None)
3205
    if mode:
3206
      disk.mode = mode
3207
      changes.append(("disk.mode/%d" % idx, disk.mode))
3208

    
3209
    name = params.get(constants.IDISK_NAME, None)
3210
    disk.name = name
3211
    changes.append(("disk.name/%d" % idx, disk.name))
3212

    
3213
    return changes
3214

    
3215
  def _RemoveDisk(self, idx, root, _):
3216
    """Removes a disk.
3217

3218
    """
3219
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3220
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3221
                             self.instance.primary_node):
3222
      self.cfg.SetDiskID(disk, node_uuid)
3223
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3224
      if msg:
3225
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3226
                        " continuing anyway", idx,
3227
                        self.cfg.GetNodeName(node_uuid), msg)
3228

    
3229
    # if this is a DRBD disk, return its port to the pool
3230
    if root.dev_type in constants.LDS_DRBD:
3231
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3232

    
3233
  def _CreateNewNic(self, idx, params, private):
3234
    """Creates data structure for a new network interface.
3235

3236
    """
3237
    mac = params[constants.INIC_MAC]
3238
    ip = params.get(constants.INIC_IP, None)
3239
    net = params.get(constants.INIC_NETWORK, None)
3240
    name = params.get(constants.INIC_NAME, None)
3241
    net_uuid = self.cfg.LookupNetwork(net)
3242
    #TODO: not private.filled?? can a nic have no nicparams??
3243
    nicparams = private.filled
3244
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3245
                       nicparams=nicparams)
3246
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3247

    
3248
    return (nobj, [
3249
      ("nic.%d" % idx,
3250
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3251
       (mac, ip, private.filled[constants.NIC_MODE],
3252
       private.filled[constants.NIC_LINK],
3253
       net)),
3254
      ])
3255

    
3256
  def _ApplyNicMods(self, idx, nic, params, private):
3257
    """Modifies a network interface.
3258

3259
    """
3260
    changes = []
3261

    
3262
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3263
      if key in params:
3264
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3265
        setattr(nic, key, params[key])
3266

    
3267
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3268
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3269
    if new_net_uuid != nic.network:
3270
      changes.append(("nic.network/%d" % idx, new_net))
3271
      nic.network = new_net_uuid
3272

    
3273
    if private.filled:
3274
      nic.nicparams = private.filled
3275

    
3276
      for (key, val) in nic.nicparams.items():
3277
        changes.append(("nic.%s/%d" % (key, idx), val))
3278

    
3279
    return changes
3280

    
3281
  def Exec(self, feedback_fn):
3282
    """Modifies an instance.
3283

3284
    All parameters take effect only at the next restart of the instance.
3285

3286
    """
3287
    # Process here the warnings from CheckPrereq, as we don't have a
3288
    # feedback_fn there.
3289
    # TODO: Replace with self.LogWarning
3290
    for warn in self.warn:
3291
      feedback_fn("WARNING: %s" % warn)
3292

    
3293
    assert ((self.op.disk_template is None) ^
3294
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3295
      "Not owning any node resource locks"
3296

    
3297
    result = []
3298

    
3299
    # New primary node
3300
    if self.op.pnode_uuid:
3301
      self.instance.primary_node = self.op.pnode_uuid
3302

    
3303
    # runtime memory
3304
    if self.op.runtime_mem:
3305
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3306
                                                     self.instance,
3307
                                                     self.op.runtime_mem)
3308
      rpcres.Raise("Cannot modify instance runtime memory")
3309
      result.append(("runtime_memory", self.op.runtime_mem))
3310

    
3311
    # Apply disk changes
3312
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3313
                        self._CreateNewDisk, self._ModifyDisk,
3314
                        self._RemoveDisk)
3315
    _UpdateIvNames(0, self.instance.disks)
3316

    
3317
    if self.op.disk_template:
3318
      if __debug__:
3319
        check_nodes = set(self.instance.all_nodes)
3320
        if self.op.remote_node_uuid:
3321
          check_nodes.add(self.op.remote_node_uuid)
3322
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3323
          owned = self.owned_locks(level)
3324
          assert not (check_nodes - owned), \
3325
            ("Not owning the correct locks, owning %r, expected at least %r" %
3326
             (owned, check_nodes))
3327

    
3328
      r_shut = ShutdownInstanceDisks(self, self.instance)
3329
      if not r_shut:
3330
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3331
                                 " proceed with disk template conversion")
3332
      mode = (self.instance.disk_template, self.op.disk_template)
3333
      try:
3334
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3335
      except:
3336
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3337
        raise
3338
      result.append(("disk_template", self.op.disk_template))
3339

    
3340
      assert self.instance.disk_template == self.op.disk_template, \
3341
        ("Expected disk template '%s', found '%s'" %
3342
         (self.op.disk_template, self.instance.disk_template))
3343

    
3344
    # Release node and resource locks if there are any (they might already have
3345
    # been released during disk conversion)
3346
    ReleaseLocks(self, locking.LEVEL_NODE)
3347
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3348

    
3349
    # Apply NIC changes
3350
    if self._new_nics is not None:
3351
      self.instance.nics = self._new_nics
3352
      result.extend(self._nic_chgdesc)
3353

    
3354
    # hvparams changes
3355
    if self.op.hvparams:
3356
      self.instance.hvparams = self.hv_inst
3357
      for key, val in self.op.hvparams.iteritems():
3358
        result.append(("hv/%s" % key, val))
3359

    
3360
    # beparams changes
3361
    if self.op.beparams:
3362
      self.instance.beparams = self.be_inst
3363
      for key, val in self.op.beparams.iteritems():
3364
        result.append(("be/%s" % key, val))
3365

    
3366
    # OS change
3367
    if self.op.os_name:
3368
      self.instance.os = self.op.os_name
3369

    
3370
    # osparams changes
3371
    if self.op.osparams:
3372
      self.instance.osparams = self.os_inst
3373
      for key, val in self.op.osparams.iteritems():
3374
        result.append(("os/%s" % key, val))
3375

    
3376
    if self.op.offline is None:
3377
      # Ignore
3378
      pass
3379
    elif self.op.offline:
3380
      # Mark instance as offline
3381
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3382
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3383
    else:
3384
      # Mark instance as online, but stopped
3385
      self.cfg.MarkInstanceDown(self.instance.uuid)
3386
      result.append(("admin_state", constants.ADMINST_DOWN))
3387

    
3388
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3389

    
3390
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3391
                self.owned_locks(locking.LEVEL_NODE)), \
3392
      "All node locks should have been released by now"
3393

    
3394
    return result
3395

    
3396
  _DISK_CONVERSIONS = {
3397
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3398
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3399
    }
3400

    
3401

    
3402
class LUInstanceChangeGroup(LogicalUnit):
3403
  HPATH = "instance-change-group"
3404
  HTYPE = constants.HTYPE_INSTANCE
3405
  REQ_BGL = False
3406

    
3407
  def ExpandNames(self):
3408
    self.share_locks = ShareAll()
3409

    
3410
    self.needed_locks = {
3411
      locking.LEVEL_NODEGROUP: [],
3412
      locking.LEVEL_NODE: [],
3413
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3414
      }
3415

    
3416
    self._ExpandAndLockInstance()
3417

    
3418
    if self.op.target_groups:
3419
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3420
                                  self.op.target_groups)
3421
    else:
3422
      self.req_target_uuids = None
3423

    
3424
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3425

    
3426
  def DeclareLocks(self, level):
3427
    if level == locking.LEVEL_NODEGROUP:
3428
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3429

    
3430
      if self.req_target_uuids:
3431
        lock_groups = set(self.req_target_uuids)
3432

    
3433
        # Lock all groups used by instance optimistically; this requires going
3434
        # via the node before it's locked, requiring verification later on
3435
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3436
        lock_groups.update(instance_groups)
3437
      else:
3438
        # No target groups, need to lock all of them
3439
        lock_groups = locking.ALL_SET
3440

    
3441
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3442

    
3443
    elif level == locking.LEVEL_NODE:
3444
      if self.req_target_uuids:
3445
        # Lock all nodes used by instances
3446
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3447
        self._LockInstancesNodes()
3448

    
3449
        # Lock all nodes in all potential target groups
3450
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3451
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3452
        member_nodes = [node_uuid
3453
                        for group in lock_groups
3454
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3455
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3456
      else:
3457
        # Lock all nodes as all groups are potential targets
3458
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3459

    
3460
  def CheckPrereq(self):
3461
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3462
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3463
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3464

    
3465
    assert (self.req_target_uuids is None or
3466
            owned_groups.issuperset(self.req_target_uuids))
3467
    assert owned_instance_names == set([self.op.instance_name])
3468

    
3469
    # Get instance information
3470
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3471

    
3472
    # Check if node groups for locked instance are still correct
3473
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3474
      ("Instance %s's nodes changed while we kept the lock" %
3475
       self.op.instance_name)
3476

    
3477
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3478
                                          owned_groups)
3479

    
3480
    if self.req_target_uuids:
3481
      # User requested specific target groups
3482
      self.target_uuids = frozenset(self.req_target_uuids)
3483
    else:
3484
      # All groups except those used by the instance are potential targets
3485
      self.target_uuids = owned_groups - inst_groups
3486

    
3487
    conflicting_groups = self.target_uuids & inst_groups
3488
    if conflicting_groups:
3489
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3490
                                 " used by the instance '%s'" %
3491
                                 (utils.CommaJoin(conflicting_groups),
3492
                                  self.op.instance_name),
3493
                                 errors.ECODE_INVAL)
3494

    
3495
    if not self.target_uuids:
3496
      raise errors.OpPrereqError("There are no possible target groups",
3497
                                 errors.ECODE_INVAL)
3498

    
3499
  def BuildHooksEnv(self):
3500
    """Build hooks env.
3501

3502
    """
3503
    assert self.target_uuids
3504

    
3505
    env = {
3506
      "TARGET_GROUPS": " ".join(self.target_uuids),
3507
      }
3508

    
3509
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3510

    
3511
    return env
3512

    
3513
  def BuildHooksNodes(self):
3514
    """Build hooks nodes.
3515

3516
    """
3517
    mn = self.cfg.GetMasterNode()
3518
    return ([mn], [mn])
3519

    
3520
  def Exec(self, feedback_fn):
3521
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3522

    
3523
    assert instances == [self.op.instance_name], "Instance not locked"
3524

    
3525
    req = iallocator.IAReqGroupChange(instances=instances,
3526
                                      target_groups=list(self.target_uuids))
3527
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3528

    
3529
    ial.Run(self.op.iallocator)
3530

    
3531
    if not ial.success:
3532
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3533
                                 " instance '%s' using iallocator '%s': %s" %
3534
                                 (self.op.instance_name, self.op.iallocator,
3535
                                  ial.info), errors.ECODE_NORES)
3536

    
3537
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3538

    
3539
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3540
                 " instance '%s'", len(jobs), self.op.instance_name)
3541

    
3542
    return ResultWithJobs(jobs)