Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance.py @ da4a52a3

History | View | Annotate | Download (139.6 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Logical units dealing with instances."""
23

    
24
import OpenSSL
25
import copy
26
import logging
27
import os
28

    
29
from ganeti import compat
30
from ganeti import constants
31
from ganeti import errors
32
from ganeti import ht
33
from ganeti import hypervisor
34
from ganeti import locking
35
from ganeti.masterd import iallocator
36
from ganeti import masterd
37
from ganeti import netutils
38
from ganeti import objects
39
from ganeti import opcodes
40
from ganeti import pathutils
41
from ganeti import rpc
42
from ganeti import utils
43

    
44
from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45

    
46
from ganeti.cmdlib.common import INSTANCE_DOWN, \
47
  INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48
  ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49
  LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50
  IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51
  AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52
  ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName
53
from ganeti.cmdlib.instance_storage import CreateDisks, \
54
  CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55
  IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
56
  CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57
  StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
58
  CheckSpindlesExclusiveStorage
59
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
60
  GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
61
  NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
62
  ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
63
  GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
64
  CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
65

    
66
import ganeti.masterd.instance
67

    
68

    
69
#: Type description for changes as returned by L{_ApplyContainerMods}'s
70
#: callbacks
71
_TApplyContModsCbChanges = \
72
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
73
    ht.TNonEmptyString,
74
    ht.TAny,
75
    ])))
76

    
77

    
78
def _CheckHostnameSane(lu, name):
79
  """Ensures that a given hostname resolves to a 'sane' name.
80

81
  The given name is required to be a prefix of the resolved hostname,
82
  to prevent accidental mismatches.
83

84
  @param lu: the logical unit on behalf of which we're checking
85
  @param name: the name we should resolve and check
86
  @return: the resolved hostname object
87

88
  """
89
  hostname = netutils.GetHostname(name=name)
90
  if hostname.name != name:
91
    lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
92
  if not utils.MatchNameComponent(name, [hostname.name]):
93
    raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
94
                                " same as given hostname '%s'") %
95
                               (hostname.name, name), errors.ECODE_INVAL)
96
  return hostname
97

    
98

    
99
def _CheckOpportunisticLocking(op):
100
  """Generate error if opportunistic locking is not possible.
101

102
  """
103
  if op.opportunistic_locking and not op.iallocator:
104
    raise errors.OpPrereqError("Opportunistic locking is only available in"
105
                               " combination with an instance allocator",
106
                               errors.ECODE_INVAL)
107

    
108

    
109
def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
110
  """Wrapper around IAReqInstanceAlloc.
111

112
  @param op: The instance opcode
113
  @param disks: The computed disks
114
  @param nics: The computed nics
115
  @param beparams: The full filled beparams
116
  @param node_name_whitelist: List of nodes which should appear as online to the
117
    allocator (unless the node is already marked offline)
118

119
  @returns: A filled L{iallocator.IAReqInstanceAlloc}
120

121
  """
122
  spindle_use = beparams[constants.BE_SPINDLE_USE]
123
  return iallocator.IAReqInstanceAlloc(name=op.instance_name,
124
                                       disk_template=op.disk_template,
125
                                       tags=op.tags,
126
                                       os=op.os_type,
127
                                       vcpus=beparams[constants.BE_VCPUS],
128
                                       memory=beparams[constants.BE_MAXMEM],
129
                                       spindle_use=spindle_use,
130
                                       disks=disks,
131
                                       nics=[n.ToDict() for n in nics],
132
                                       hypervisor=op.hypervisor,
133
                                       node_whitelist=node_name_whitelist)
134

    
135

    
136
def _ComputeFullBeParams(op, cluster):
137
  """Computes the full beparams.
138

139
  @param op: The instance opcode
140
  @param cluster: The cluster config object
141

142
  @return: The fully filled beparams
143

144
  """
145
  default_beparams = cluster.beparams[constants.PP_DEFAULT]
146
  for param, value in op.beparams.iteritems():
147
    if value == constants.VALUE_AUTO:
148
      op.beparams[param] = default_beparams[param]
149
  objects.UpgradeBeParams(op.beparams)
150
  utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES)
151
  return cluster.SimpleFillBE(op.beparams)
152

    
153

    
154
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
155
  """Computes the nics.
156

157
  @param op: The instance opcode
158
  @param cluster: Cluster configuration object
159
  @param default_ip: The default ip to assign
160
  @param cfg: An instance of the configuration object
161
  @param ec_id: Execution context ID
162

163
  @returns: The build up nics
164

165
  """
166
  nics = []
167
  for nic in op.nics:
168
    nic_mode_req = nic.get(constants.INIC_MODE, None)
169
    nic_mode = nic_mode_req
170
    if nic_mode is None or nic_mode == constants.VALUE_AUTO:
171
      nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
172

    
173
    net = nic.get(constants.INIC_NETWORK, None)
174
    link = nic.get(constants.NIC_LINK, None)
175
    ip = nic.get(constants.INIC_IP, None)
176

    
177
    if net is None or net.lower() == constants.VALUE_NONE:
178
      net = None
179
    else:
180
      if nic_mode_req is not None or link is not None:
181
        raise errors.OpPrereqError("If network is given, no mode or link"
182
                                   " is allowed to be passed",
183
                                   errors.ECODE_INVAL)
184

    
185
    # ip validity checks
186
    if ip is None or ip.lower() == constants.VALUE_NONE:
187
      nic_ip = None
188
    elif ip.lower() == constants.VALUE_AUTO:
189
      if not op.name_check:
190
        raise errors.OpPrereqError("IP address set to auto but name checks"
191
                                   " have been skipped",
192
                                   errors.ECODE_INVAL)
193
      nic_ip = default_ip
194
    else:
195
      # We defer pool operations until later, so that the iallocator has
196
      # filled in the instance's node(s) dimara
197
      if ip.lower() == constants.NIC_IP_POOL:
198
        if net is None:
199
          raise errors.OpPrereqError("if ip=pool, parameter network"
200
                                     " must be passed too",
201
                                     errors.ECODE_INVAL)
202

    
203
      elif not netutils.IPAddress.IsValid(ip):
204
        raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
205
                                   errors.ECODE_INVAL)
206

    
207
      nic_ip = ip
208

    
209
    # TODO: check the ip address for uniqueness
210
    if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
211
      raise errors.OpPrereqError("Routed nic mode requires an ip address",
212
                                 errors.ECODE_INVAL)
213

    
214
    # MAC address verification
215
    mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
216
    if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
217
      mac = utils.NormalizeAndValidateMac(mac)
218

    
219
      try:
220
        # TODO: We need to factor this out
221
        cfg.ReserveMAC(mac, ec_id)
222
      except errors.ReservationError:
223
        raise errors.OpPrereqError("MAC address %s already in use"
224
                                   " in cluster" % mac,
225
                                   errors.ECODE_NOTUNIQUE)
226

    
227
    #  Build nic parameters
228
    nicparams = {}
229
    if nic_mode_req:
230
      nicparams[constants.NIC_MODE] = nic_mode
231
    if link:
232
      nicparams[constants.NIC_LINK] = link
233

    
234
    check_params = cluster.SimpleFillNIC(nicparams)
235
    objects.NIC.CheckParameterSyntax(check_params)
236
    net_uuid = cfg.LookupNetwork(net)
237
    name = nic.get(constants.INIC_NAME, None)
238
    if name is not None and name.lower() == constants.VALUE_NONE:
239
      name = None
240
    nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
241
                          network=net_uuid, nicparams=nicparams)
242
    nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
243
    nics.append(nic_obj)
244

    
245
  return nics
246

    
247

    
248
def _CheckForConflictingIp(lu, ip, node_uuid):
249
  """In case of conflicting IP address raise error.
250

251
  @type ip: string
252
  @param ip: IP address
253
  @type node_uuid: string
254
  @param node_uuid: node UUID
255

256
  """
257
  (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
258
  if conf_net is not None:
259
    raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
260
                                " network %s, but the target NIC does not." %
261
                                (ip, conf_net)),
262
                               errors.ECODE_STATE)
263

    
264
  return (None, None)
265

    
266

    
267
def _ComputeIPolicyInstanceSpecViolation(
268
  ipolicy, instance_spec, disk_template,
269
  _compute_fn=ComputeIPolicySpecViolation):
270
  """Compute if instance specs meets the specs of ipolicy.
271

272
  @type ipolicy: dict
273
  @param ipolicy: The ipolicy to verify against
274
  @param instance_spec: dict
275
  @param instance_spec: The instance spec to verify
276
  @type disk_template: string
277
  @param disk_template: the disk template of the instance
278
  @param _compute_fn: The function to verify ipolicy (unittest only)
279
  @see: L{ComputeIPolicySpecViolation}
280

281
  """
282
  mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
283
  cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
284
  disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
285
  disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
286
  nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
287
  spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
288

    
289
  return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
290
                     disk_sizes, spindle_use, disk_template)
291

    
292

    
293
def _CheckOSVariant(os_obj, name):
294
  """Check whether an OS name conforms to the os variants specification.
295

296
  @type os_obj: L{objects.OS}
297
  @param os_obj: OS object to check
298
  @type name: string
299
  @param name: OS name passed by the user, to check for validity
300

301
  """
302
  variant = objects.OS.GetVariant(name)
303
  if not os_obj.supported_variants:
304
    if variant:
305
      raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
306
                                 " passed)" % (os_obj.name, variant),
307
                                 errors.ECODE_INVAL)
308
    return
309
  if not variant:
310
    raise errors.OpPrereqError("OS name must include a variant",
311
                               errors.ECODE_INVAL)
312

    
313
  if variant not in os_obj.supported_variants:
314
    raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
315

    
316

    
317
class LUInstanceCreate(LogicalUnit):
318
  """Create an instance.
319

320
  """
321
  HPATH = "instance-add"
322
  HTYPE = constants.HTYPE_INSTANCE
323
  REQ_BGL = False
324

    
325
  def CheckArguments(self):
326
    """Check arguments.
327

328
    """
329
    # do not require name_check to ease forward/backward compatibility
330
    # for tools
331
    if self.op.no_install and self.op.start:
332
      self.LogInfo("No-installation mode selected, disabling startup")
333
      self.op.start = False
334
    # validate/normalize the instance name
335
    self.op.instance_name = \
336
      netutils.Hostname.GetNormalizedName(self.op.instance_name)
337

    
338
    if self.op.ip_check and not self.op.name_check:
339
      # TODO: make the ip check more flexible and not depend on the name check
340
      raise errors.OpPrereqError("Cannot do IP address check without a name"
341
                                 " check", errors.ECODE_INVAL)
342

    
343
    # check nics' parameter names
344
    for nic in self.op.nics:
345
      utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
346
    # check that NIC's parameters names are unique and valid
347
    utils.ValidateDeviceNames("NIC", self.op.nics)
348

    
349
    # check that disk's names are unique and valid
350
    utils.ValidateDeviceNames("disk", self.op.disks)
351

    
352
    cluster = self.cfg.GetClusterInfo()
353
    if not self.op.disk_template in cluster.enabled_disk_templates:
354
      raise errors.OpPrereqError("Cannot create an instance with disk template"
355
                                 " '%s', because it is not enabled in the"
356
                                 " cluster. Enabled disk templates are: %s." %
357
                                 (self.op.disk_template,
358
                                  ",".join(cluster.enabled_disk_templates)))
359

    
360
    # check disks. parameter names and consistent adopt/no-adopt strategy
361
    has_adopt = has_no_adopt = False
362
    for disk in self.op.disks:
363
      if self.op.disk_template != constants.DT_EXT:
364
        utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
365
      if constants.IDISK_ADOPT in disk:
366
        has_adopt = True
367
      else:
368
        has_no_adopt = True
369
    if has_adopt and has_no_adopt:
370
      raise errors.OpPrereqError("Either all disks are adopted or none is",
371
                                 errors.ECODE_INVAL)
372
    if has_adopt:
373
      if self.op.disk_template not in constants.DTS_MAY_ADOPT:
374
        raise errors.OpPrereqError("Disk adoption is not supported for the"
375
                                   " '%s' disk template" %
376
                                   self.op.disk_template,
377
                                   errors.ECODE_INVAL)
378
      if self.op.iallocator is not None:
379
        raise errors.OpPrereqError("Disk adoption not allowed with an"
380
                                   " iallocator script", errors.ECODE_INVAL)
381
      if self.op.mode == constants.INSTANCE_IMPORT:
382
        raise errors.OpPrereqError("Disk adoption not allowed for"
383
                                   " instance import", errors.ECODE_INVAL)
384
    else:
385
      if self.op.disk_template in constants.DTS_MUST_ADOPT:
386
        raise errors.OpPrereqError("Disk template %s requires disk adoption,"
387
                                   " but no 'adopt' parameter given" %
388
                                   self.op.disk_template,
389
                                   errors.ECODE_INVAL)
390

    
391
    self.adopt_disks = has_adopt
392

    
393
    # instance name verification
394
    if self.op.name_check:
395
      self.hostname = _CheckHostnameSane(self, self.op.instance_name)
396
      self.op.instance_name = self.hostname.name
397
      # used in CheckPrereq for ip ping check
398
      self.check_ip = self.hostname.ip
399
    else:
400
      self.check_ip = None
401

    
402
    # file storage checks
403
    if (self.op.file_driver and
404
        not self.op.file_driver in constants.FILE_DRIVER):
405
      raise errors.OpPrereqError("Invalid file driver name '%s'" %
406
                                 self.op.file_driver, errors.ECODE_INVAL)
407

    
408
    if self.op.disk_template == constants.DT_FILE:
409
      opcodes.RequireFileStorage()
410
    elif self.op.disk_template == constants.DT_SHARED_FILE:
411
      opcodes.RequireSharedFileStorage()
412

    
413
    ### Node/iallocator related checks
414
    CheckIAllocatorOrNode(self, "iallocator", "pnode")
415

    
416
    if self.op.pnode is not None:
417
      if self.op.disk_template in constants.DTS_INT_MIRROR:
418
        if self.op.snode is None:
419
          raise errors.OpPrereqError("The networked disk templates need"
420
                                     " a mirror node", errors.ECODE_INVAL)
421
      elif self.op.snode:
422
        self.LogWarning("Secondary node will be ignored on non-mirrored disk"
423
                        " template")
424
        self.op.snode = None
425

    
426
    _CheckOpportunisticLocking(self.op)
427

    
428
    self._cds = GetClusterDomainSecret()
429

    
430
    if self.op.mode == constants.INSTANCE_IMPORT:
431
      # On import force_variant must be True, because if we forced it at
432
      # initial install, our only chance when importing it back is that it
433
      # works again!
434
      self.op.force_variant = True
435

    
436
      if self.op.no_install:
437
        self.LogInfo("No-installation mode has no effect during import")
438

    
439
    elif self.op.mode == constants.INSTANCE_CREATE:
440
      if self.op.os_type is None:
441
        raise errors.OpPrereqError("No guest OS specified",
442
                                   errors.ECODE_INVAL)
443
      if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
444
        raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
445
                                   " installation" % self.op.os_type,
446
                                   errors.ECODE_STATE)
447
      if self.op.disk_template is None:
448
        raise errors.OpPrereqError("No disk template specified",
449
                                   errors.ECODE_INVAL)
450

    
451
    elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
452
      # Check handshake to ensure both clusters have the same domain secret
453
      src_handshake = self.op.source_handshake
454
      if not src_handshake:
455
        raise errors.OpPrereqError("Missing source handshake",
456
                                   errors.ECODE_INVAL)
457

    
458
      errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
459
                                                           src_handshake)
460
      if errmsg:
461
        raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
462
                                   errors.ECODE_INVAL)
463

    
464
      # Load and check source CA
465
      self.source_x509_ca_pem = self.op.source_x509_ca
466
      if not self.source_x509_ca_pem:
467
        raise errors.OpPrereqError("Missing source X509 CA",
468
                                   errors.ECODE_INVAL)
469

    
470
      try:
471
        (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
472
                                                    self._cds)
473
      except OpenSSL.crypto.Error, err:
474
        raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
475
                                   (err, ), errors.ECODE_INVAL)
476

    
477
      (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
478
      if errcode is not None:
479
        raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
480
                                   errors.ECODE_INVAL)
481

    
482
      self.source_x509_ca = cert
483

    
484
      src_instance_name = self.op.source_instance_name
485
      if not src_instance_name:
486
        raise errors.OpPrereqError("Missing source instance name",
487
                                   errors.ECODE_INVAL)
488

    
489
      self.source_instance_name = \
490
        netutils.GetHostname(name=src_instance_name).name
491

    
492
    else:
493
      raise errors.OpPrereqError("Invalid instance creation mode %r" %
494
                                 self.op.mode, errors.ECODE_INVAL)
495

    
496
  def ExpandNames(self):
497
    """ExpandNames for CreateInstance.
498

499
    Figure out the right locks for instance creation.
500

501
    """
502
    self.needed_locks = {}
503

    
504
    # this is just a preventive check, but someone might still add this
505
    # instance in the meantime, and creation will fail at lock-add time
506
    if self.op.instance_name in\
507
      [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
508
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
509
                                 self.op.instance_name, errors.ECODE_EXISTS)
510

    
511
    self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
512

    
513
    if self.op.iallocator:
514
      # TODO: Find a solution to not lock all nodes in the cluster, e.g. by
515
      # specifying a group on instance creation and then selecting nodes from
516
      # that group
517
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
518
      self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
519

    
520
      if self.op.opportunistic_locking:
521
        self.opportunistic_locks[locking.LEVEL_NODE] = True
522
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
523
    else:
524
      (self.op.pnode_uuid, self.op.pnode) = \
525
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
526
      nodelist = [self.op.pnode_uuid]
527
      if self.op.snode is not None:
528
        (self.op.snode_uuid, self.op.snode) = \
529
          ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
530
        nodelist.append(self.op.snode_uuid)
531
      self.needed_locks[locking.LEVEL_NODE] = nodelist
532

    
533
    # in case of import lock the source node too
534
    if self.op.mode == constants.INSTANCE_IMPORT:
535
      src_node = self.op.src_node
536
      src_path = self.op.src_path
537

    
538
      if src_path is None:
539
        self.op.src_path = src_path = self.op.instance_name
540

    
541
      if src_node is None:
542
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
543
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
544
        self.op.src_node = None
545
        if os.path.isabs(src_path):
546
          raise errors.OpPrereqError("Importing an instance from a path"
547
                                     " requires a source node option",
548
                                     errors.ECODE_INVAL)
549
      else:
550
        (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
551
          ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
552
        if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
553
          self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
554
        if not os.path.isabs(src_path):
555
          self.op.src_path = src_path = \
556
            utils.PathJoin(pathutils.EXPORT_DIR, src_path)
557

    
558
    self.needed_locks[locking.LEVEL_NODE_RES] = \
559
      CopyLockList(self.needed_locks[locking.LEVEL_NODE])
560

    
561
  def _RunAllocator(self):
562
    """Run the allocator based on input opcode.
563

564
    """
565
    if self.op.opportunistic_locking:
566
      # Only consider nodes for which a lock is held
567
      node_name_whitelist = self.cfg.GetNodeNames(
568
        self.owned_locks(locking.LEVEL_NODE))
569
    else:
570
      node_name_whitelist = None
571

    
572
    #TODO Export network to iallocator so that it chooses a pnode
573
    #     in a nodegroup that has the desired network connected to
574
    req = _CreateInstanceAllocRequest(self.op, self.disks,
575
                                      self.nics, self.be_full,
576
                                      node_name_whitelist)
577
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
578

    
579
    ial.Run(self.op.iallocator)
580

    
581
    if not ial.success:
582
      # When opportunistic locks are used only a temporary failure is generated
583
      if self.op.opportunistic_locking:
584
        ecode = errors.ECODE_TEMP_NORES
585
      else:
586
        ecode = errors.ECODE_NORES
587

    
588
      raise errors.OpPrereqError("Can't compute nodes using"
589
                                 " iallocator '%s': %s" %
590
                                 (self.op.iallocator, ial.info),
591
                                 ecode)
592

    
593
    (self.op.pnode_uuid, self.op.pnode) = \
594
      ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
595
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
596
                 self.op.instance_name, self.op.iallocator,
597
                 utils.CommaJoin(ial.result))
598

    
599
    assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
600

    
601
    if req.RequiredNodes() == 2:
602
      (self.op.snode_uuid, self.op.snode) = \
603
        ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
604

    
605
  def BuildHooksEnv(self):
606
    """Build hooks env.
607

608
    This runs on master, primary and secondary nodes of the instance.
609

610
    """
611
    env = {
612
      "ADD_MODE": self.op.mode,
613
      }
614
    if self.op.mode == constants.INSTANCE_IMPORT:
615
      env["SRC_NODE"] = self.op.src_node
616
      env["SRC_PATH"] = self.op.src_path
617
      env["SRC_IMAGES"] = self.src_images
618

    
619
    env.update(BuildInstanceHookEnv(
620
      name=self.op.instance_name,
621
      primary_node_name=self.op.pnode,
622
      secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
623
      status=self.op.start,
624
      os_type=self.op.os_type,
625
      minmem=self.be_full[constants.BE_MINMEM],
626
      maxmem=self.be_full[constants.BE_MAXMEM],
627
      vcpus=self.be_full[constants.BE_VCPUS],
628
      nics=NICListToTuple(self, self.nics),
629
      disk_template=self.op.disk_template,
630
      disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
631
              d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
632
             for d in self.disks],
633
      bep=self.be_full,
634
      hvp=self.hv_full,
635
      hypervisor_name=self.op.hypervisor,
636
      tags=self.op.tags,
637
      ))
638

    
639
    return env
640

    
641
  def BuildHooksNodes(self):
642
    """Build hooks nodes.
643

644
    """
645
    nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
646
    return nl, nl
647

    
648
  def _ReadExportInfo(self):
649
    """Reads the export information from disk.
650

651
    It will override the opcode source node and path with the actual
652
    information, if these two were not specified before.
653

654
    @return: the export information
655

656
    """
657
    assert self.op.mode == constants.INSTANCE_IMPORT
658

    
659
    if self.op.src_node_uuid is None:
660
      locked_nodes = self.owned_locks(locking.LEVEL_NODE)
661
      exp_list = self.rpc.call_export_list(locked_nodes)
662
      found = False
663
      for node in exp_list:
664
        if exp_list[node].fail_msg:
665
          continue
666
        if self.op.src_path in exp_list[node].payload:
667
          found = True
668
          self.op.src_node = node
669
          self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
670
          self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
671
                                            self.op.src_path)
672
          break
673
      if not found:
674
        raise errors.OpPrereqError("No export found for relative path %s" %
675
                                   self.op.src_path, errors.ECODE_INVAL)
676

    
677
    CheckNodeOnline(self, self.op.src_node_uuid)
678
    result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path)
679
    result.Raise("No export or invalid export found in dir %s" %
680
                 self.op.src_path)
681

    
682
    export_info = objects.SerializableConfigParser.Loads(str(result.payload))
683
    if not export_info.has_section(constants.INISECT_EXP):
684
      raise errors.ProgrammerError("Corrupted export config",
685
                                   errors.ECODE_ENVIRON)
686

    
687
    ei_version = export_info.get(constants.INISECT_EXP, "version")
688
    if int(ei_version) != constants.EXPORT_VERSION:
689
      raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
690
                                 (ei_version, constants.EXPORT_VERSION),
691
                                 errors.ECODE_ENVIRON)
692
    return export_info
693

    
694
  def _ReadExportParams(self, einfo):
695
    """Use export parameters as defaults.
696

697
    In case the opcode doesn't specify (as in override) some instance
698
    parameters, then try to use them from the export information, if
699
    that declares them.
700

701
    """
702
    self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
703

    
704
    if self.op.disk_template is None:
705
      if einfo.has_option(constants.INISECT_INS, "disk_template"):
706
        self.op.disk_template = einfo.get(constants.INISECT_INS,
707
                                          "disk_template")
708
        if self.op.disk_template not in constants.DISK_TEMPLATES:
709
          raise errors.OpPrereqError("Disk template specified in configuration"
710
                                     " file is not one of the allowed values:"
711
                                     " %s" %
712
                                     " ".join(constants.DISK_TEMPLATES),
713
                                     errors.ECODE_INVAL)
714
      else:
715
        raise errors.OpPrereqError("No disk template specified and the export"
716
                                   " is missing the disk_template information",
717
                                   errors.ECODE_INVAL)
718

    
719
    if not self.op.disks:
720
      disks = []
721
      # TODO: import the disk iv_name too
722
      for idx in range(constants.MAX_DISKS):
723
        if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
724
          disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
725
          disks.append({constants.IDISK_SIZE: disk_sz})
726
      self.op.disks = disks
727
      if not disks and self.op.disk_template != constants.DT_DISKLESS:
728
        raise errors.OpPrereqError("No disk info specified and the export"
729
                                   " is missing the disk information",
730
                                   errors.ECODE_INVAL)
731

    
732
    if not self.op.nics:
733
      nics = []
734
      for idx in range(constants.MAX_NICS):
735
        if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
736
          ndict = {}
737
          for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
738
            v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
739
            ndict[name] = v
740
          nics.append(ndict)
741
        else:
742
          break
743
      self.op.nics = nics
744

    
745
    if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
746
      self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
747

    
748
    if (self.op.hypervisor is None and
749
        einfo.has_option(constants.INISECT_INS, "hypervisor")):
750
      self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
751

    
752
    if einfo.has_section(constants.INISECT_HYP):
753
      # use the export parameters but do not override the ones
754
      # specified by the user
755
      for name, value in einfo.items(constants.INISECT_HYP):
756
        if name not in self.op.hvparams:
757
          self.op.hvparams[name] = value
758

    
759
    if einfo.has_section(constants.INISECT_BEP):
760
      # use the parameters, without overriding
761
      for name, value in einfo.items(constants.INISECT_BEP):
762
        if name not in self.op.beparams:
763
          self.op.beparams[name] = value
764
        # Compatibility for the old "memory" be param
765
        if name == constants.BE_MEMORY:
766
          if constants.BE_MAXMEM not in self.op.beparams:
767
            self.op.beparams[constants.BE_MAXMEM] = value
768
          if constants.BE_MINMEM not in self.op.beparams:
769
            self.op.beparams[constants.BE_MINMEM] = value
770
    else:
771
      # try to read the parameters old style, from the main section
772
      for name in constants.BES_PARAMETERS:
773
        if (name not in self.op.beparams and
774
            einfo.has_option(constants.INISECT_INS, name)):
775
          self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
776

    
777
    if einfo.has_section(constants.INISECT_OSP):
778
      # use the parameters, without overriding
779
      for name, value in einfo.items(constants.INISECT_OSP):
780
        if name not in self.op.osparams:
781
          self.op.osparams[name] = value
782

    
783
  def _RevertToDefaults(self, cluster):
784
    """Revert the instance parameters to the default values.
785

786
    """
787
    # hvparams
788
    hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {})
789
    for name in self.op.hvparams.keys():
790
      if name in hv_defs and hv_defs[name] == self.op.hvparams[name]:
791
        del self.op.hvparams[name]
792
    # beparams
793
    be_defs = cluster.SimpleFillBE({})
794
    for name in self.op.beparams.keys():
795
      if name in be_defs and be_defs[name] == self.op.beparams[name]:
796
        del self.op.beparams[name]
797
    # nic params
798
    nic_defs = cluster.SimpleFillNIC({})
799
    for nic in self.op.nics:
800
      for name in constants.NICS_PARAMETERS:
801
        if name in nic and name in nic_defs and nic[name] == nic_defs[name]:
802
          del nic[name]
803
    # osparams
804
    os_defs = cluster.SimpleFillOS(self.op.os_type, {})
805
    for name in self.op.osparams.keys():
806
      if name in os_defs and os_defs[name] == self.op.osparams[name]:
807
        del self.op.osparams[name]
808

    
809
  def _CalculateFileStorageDir(self):
810
    """Calculate final instance file storage dir.
811

812
    """
813
    # file storage dir calculation/check
814
    self.instance_file_storage_dir = None
815
    if self.op.disk_template in constants.DTS_FILEBASED:
816
      # build the full file storage dir path
817
      joinargs = []
818

    
819
      if self.op.disk_template == constants.DT_SHARED_FILE:
820
        get_fsd_fn = self.cfg.GetSharedFileStorageDir
821
      else:
822
        get_fsd_fn = self.cfg.GetFileStorageDir
823

    
824
      cfg_storagedir = get_fsd_fn()
825
      if not cfg_storagedir:
826
        raise errors.OpPrereqError("Cluster file storage dir not defined",
827
                                   errors.ECODE_STATE)
828
      joinargs.append(cfg_storagedir)
829

    
830
      if self.op.file_storage_dir is not None:
831
        joinargs.append(self.op.file_storage_dir)
832

    
833
      joinargs.append(self.op.instance_name)
834

    
835
      # pylint: disable=W0142
836
      self.instance_file_storage_dir = utils.PathJoin(*joinargs)
837

    
838
  def CheckPrereq(self): # pylint: disable=R0914
839
    """Check prerequisites.
840

841
    """
842
    self._CalculateFileStorageDir()
843

    
844
    if self.op.mode == constants.INSTANCE_IMPORT:
845
      export_info = self._ReadExportInfo()
846
      self._ReadExportParams(export_info)
847
      self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
848
    else:
849
      self._old_instance_name = None
850

    
851
    if (not self.cfg.GetVGName() and
852
        self.op.disk_template not in constants.DTS_NOT_LVM):
853
      raise errors.OpPrereqError("Cluster does not support lvm-based"
854
                                 " instances", errors.ECODE_STATE)
855

    
856
    if (self.op.hypervisor is None or
857
        self.op.hypervisor == constants.VALUE_AUTO):
858
      self.op.hypervisor = self.cfg.GetHypervisorType()
859

    
860
    cluster = self.cfg.GetClusterInfo()
861
    enabled_hvs = cluster.enabled_hypervisors
862
    if self.op.hypervisor not in enabled_hvs:
863
      raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
864
                                 " cluster (%s)" %
865
                                 (self.op.hypervisor, ",".join(enabled_hvs)),
866
                                 errors.ECODE_STATE)
867

    
868
    # Check tag validity
869
    for tag in self.op.tags:
870
      objects.TaggableObject.ValidateTag(tag)
871

    
872
    # check hypervisor parameter syntax (locally)
873
    utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
874
    filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
875
                                      self.op.hvparams)
876
    hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
877
    hv_type.CheckParameterSyntax(filled_hvp)
878
    self.hv_full = filled_hvp
879
    # check that we don't specify global parameters on an instance
880
    CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
881
                         "instance", "cluster")
882

    
883
    # fill and remember the beparams dict
884
    self.be_full = _ComputeFullBeParams(self.op, cluster)
885

    
886
    # build os parameters
887
    self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
888

    
889
    # now that hvp/bep are in final format, let's reset to defaults,
890
    # if told to do so
891
    if self.op.identify_defaults:
892
      self._RevertToDefaults(cluster)
893

    
894
    # NIC buildup
895
    self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
896
                             self.proc.GetECId())
897

    
898
    # disk checks/pre-build
899
    default_vg = self.cfg.GetVGName()
900
    self.disks = ComputeDisks(self.op, default_vg)
901

    
902
    if self.op.mode == constants.INSTANCE_IMPORT:
903
      disk_images = []
904
      for idx in range(len(self.disks)):
905
        option = "disk%d_dump" % idx
906
        if export_info.has_option(constants.INISECT_INS, option):
907
          # FIXME: are the old os-es, disk sizes, etc. useful?
908
          export_name = export_info.get(constants.INISECT_INS, option)
909
          image = utils.PathJoin(self.op.src_path, export_name)
910
          disk_images.append(image)
911
        else:
912
          disk_images.append(False)
913

    
914
      self.src_images = disk_images
915

    
916
      if self.op.instance_name == self._old_instance_name:
917
        for idx, nic in enumerate(self.nics):
918
          if nic.mac == constants.VALUE_AUTO:
919
            nic_mac_ini = "nic%d_mac" % idx
920
            nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
921

    
922
    # ENDIF: self.op.mode == constants.INSTANCE_IMPORT
923

    
924
    # ip ping checks (we use the same ip that was resolved in ExpandNames)
925
    if self.op.ip_check:
926
      if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
927
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
928
                                   (self.check_ip, self.op.instance_name),
929
                                   errors.ECODE_NOTUNIQUE)
930

    
931
    #### mac address generation
932
    # By generating here the mac address both the allocator and the hooks get
933
    # the real final mac address rather than the 'auto' or 'generate' value.
934
    # There is a race condition between the generation and the instance object
935
    # creation, which means that we know the mac is valid now, but we're not
936
    # sure it will be when we actually add the instance. If things go bad
937
    # adding the instance will abort because of a duplicate mac, and the
938
    # creation job will fail.
939
    for nic in self.nics:
940
      if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
941
        nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
942

    
943
    #### allocator run
944

    
945
    if self.op.iallocator is not None:
946
      self._RunAllocator()
947

    
948
    # Release all unneeded node locks
949
    keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
950
                               self.op.src_node_uuid])
951
    ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
952
    ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
953
    ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
954

    
955
    assert (self.owned_locks(locking.LEVEL_NODE) ==
956
            self.owned_locks(locking.LEVEL_NODE_RES)), \
957
      "Node locks differ from node resource locks"
958

    
959
    #### node related checks
960

    
961
    # check primary node
962
    self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
963
    assert self.pnode is not None, \
964
      "Cannot retrieve locked node %s" % self.op.pnode_uuid
965
    if pnode.offline:
966
      raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
967
                                 pnode.name, errors.ECODE_STATE)
968
    if pnode.drained:
969
      raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
970
                                 pnode.name, errors.ECODE_STATE)
971
    if not pnode.vm_capable:
972
      raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
973
                                 " '%s'" % pnode.name, errors.ECODE_STATE)
974

    
975
    self.secondaries = []
976

    
977
    # Fill in any IPs from IP pools. This must happen here, because we need to
978
    # know the nic's primary node, as specified by the iallocator
979
    for idx, nic in enumerate(self.nics):
980
      net_uuid = nic.network
981
      if net_uuid is not None:
982
        nobj = self.cfg.GetNetwork(net_uuid)
983
        netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
984
        if netparams is None:
985
          raise errors.OpPrereqError("No netparams found for network"
986
                                     " %s. Propably not connected to"
987
                                     " node's %s nodegroup" %
988
                                     (nobj.name, self.pnode.name),
989
                                     errors.ECODE_INVAL)
990
        self.LogInfo("NIC/%d inherits netparams %s" %
991
                     (idx, netparams.values()))
992
        nic.nicparams = dict(netparams)
993
        if nic.ip is not None:
994
          if nic.ip.lower() == constants.NIC_IP_POOL:
995
            try:
996
              nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
997
            except errors.ReservationError:
998
              raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
999
                                         " from the address pool" % idx,
1000
                                         errors.ECODE_STATE)
1001
            self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1002
          else:
1003
            try:
1004
              self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1005
            except errors.ReservationError:
1006
              raise errors.OpPrereqError("IP address %s already in use"
1007
                                         " or does not belong to network %s" %
1008
                                         (nic.ip, nobj.name),
1009
                                         errors.ECODE_NOTUNIQUE)
1010

    
1011
      # net is None, ip None or given
1012
      elif self.op.conflicts_check:
1013
        _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1014

    
1015
    # mirror node verification
1016
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1017
      if self.op.snode_uuid == pnode.uuid:
1018
        raise errors.OpPrereqError("The secondary node cannot be the"
1019
                                   " primary node", errors.ECODE_INVAL)
1020
      CheckNodeOnline(self, self.op.snode_uuid)
1021
      CheckNodeNotDrained(self, self.op.snode_uuid)
1022
      CheckNodeVmCapable(self, self.op.snode_uuid)
1023
      self.secondaries.append(self.op.snode_uuid)
1024

    
1025
      snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1026
      if pnode.group != snode.group:
1027
        self.LogWarning("The primary and secondary nodes are in two"
1028
                        " different node groups; the disk parameters"
1029
                        " from the first disk's node group will be"
1030
                        " used")
1031

    
1032
    nodes = [pnode]
1033
    if self.op.disk_template in constants.DTS_INT_MIRROR:
1034
      nodes.append(snode)
1035
    has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1036
    excl_stor = compat.any(map(has_es, nodes))
1037
    if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1038
      raise errors.OpPrereqError("Disk template %s not supported with"
1039
                                 " exclusive storage" % self.op.disk_template,
1040
                                 errors.ECODE_STATE)
1041
    for disk in self.disks:
1042
      CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1043

    
1044
    node_uuids = [pnode.uuid] + self.secondaries
1045

    
1046
    if not self.adopt_disks:
1047
      if self.op.disk_template == constants.DT_RBD:
1048
        # _CheckRADOSFreeSpace() is just a placeholder.
1049
        # Any function that checks prerequisites can be placed here.
1050
        # Check if there is enough space on the RADOS cluster.
1051
        CheckRADOSFreeSpace()
1052
      elif self.op.disk_template == constants.DT_EXT:
1053
        # FIXME: Function that checks prereqs if needed
1054
        pass
1055
      elif self.op.disk_template in utils.GetLvmDiskTemplates():
1056
        # Check lv size requirements, if not adopting
1057
        req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1058
        CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1059
      else:
1060
        # FIXME: add checks for other, non-adopting, non-lvm disk templates
1061
        pass
1062

    
1063
    elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
1064
      all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1065
                                disk[constants.IDISK_ADOPT])
1066
                     for disk in self.disks])
1067
      if len(all_lvs) != len(self.disks):
1068
        raise errors.OpPrereqError("Duplicate volume names given for adoption",
1069
                                   errors.ECODE_INVAL)
1070
      for lv_name in all_lvs:
1071
        try:
1072
          # FIXME: lv_name here is "vg/lv" need to ensure that other calls
1073
          # to ReserveLV uses the same syntax
1074
          self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1075
        except errors.ReservationError:
1076
          raise errors.OpPrereqError("LV named %s used by another instance" %
1077
                                     lv_name, errors.ECODE_NOTUNIQUE)
1078

    
1079
      vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1080
      vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1081

    
1082
      node_lvs = self.rpc.call_lv_list([pnode.uuid],
1083
                                       vg_names.payload.keys())[pnode.uuid]
1084
      node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1085
      node_lvs = node_lvs.payload
1086

    
1087
      delta = all_lvs.difference(node_lvs.keys())
1088
      if delta:
1089
        raise errors.OpPrereqError("Missing logical volume(s): %s" %
1090
                                   utils.CommaJoin(delta),
1091
                                   errors.ECODE_INVAL)
1092
      online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1093
      if online_lvs:
1094
        raise errors.OpPrereqError("Online logical volumes found, cannot"
1095
                                   " adopt: %s" % utils.CommaJoin(online_lvs),
1096
                                   errors.ECODE_STATE)
1097
      # update the size of disk based on what is found
1098
      for dsk in self.disks:
1099
        dsk[constants.IDISK_SIZE] = \
1100
          int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1101
                                        dsk[constants.IDISK_ADOPT])][0]))
1102

    
1103
    elif self.op.disk_template == constants.DT_BLOCK:
1104
      # Normalize and de-duplicate device paths
1105
      all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1106
                       for disk in self.disks])
1107
      if len(all_disks) != len(self.disks):
1108
        raise errors.OpPrereqError("Duplicate disk names given for adoption",
1109
                                   errors.ECODE_INVAL)
1110
      baddisks = [d for d in all_disks
1111
                  if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1112
      if baddisks:
1113
        raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1114
                                   " cannot be adopted" %
1115
                                   (utils.CommaJoin(baddisks),
1116
                                    constants.ADOPTABLE_BLOCKDEV_ROOT),
1117
                                   errors.ECODE_INVAL)
1118

    
1119
      node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1120
                                            list(all_disks))[pnode.uuid]
1121
      node_disks.Raise("Cannot get block device information from node %s" %
1122
                       pnode.name)
1123
      node_disks = node_disks.payload
1124
      delta = all_disks.difference(node_disks.keys())
1125
      if delta:
1126
        raise errors.OpPrereqError("Missing block device(s): %s" %
1127
                                   utils.CommaJoin(delta),
1128
                                   errors.ECODE_INVAL)
1129
      for dsk in self.disks:
1130
        dsk[constants.IDISK_SIZE] = \
1131
          int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1132

    
1133
    # Verify instance specs
1134
    spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1135
    ispec = {
1136
      constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1137
      constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1138
      constants.ISPEC_DISK_COUNT: len(self.disks),
1139
      constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1140
                                  for disk in self.disks],
1141
      constants.ISPEC_NIC_COUNT: len(self.nics),
1142
      constants.ISPEC_SPINDLE_USE: spindle_use,
1143
      }
1144

    
1145
    group_info = self.cfg.GetNodeGroup(pnode.group)
1146
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1147
    res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1148
                                               self.op.disk_template)
1149
    if not self.op.ignore_ipolicy and res:
1150
      msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1151
             (pnode.group, group_info.name, utils.CommaJoin(res)))
1152
      raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1153

    
1154
    CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1155

    
1156
    CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1157
    # check OS parameters (remotely)
1158
    CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1159

    
1160
    CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1161

    
1162
    #TODO: _CheckExtParams (remotely)
1163
    # Check parameters for extstorage
1164

    
1165
    # memory check on primary node
1166
    #TODO(dynmem): use MINMEM for checking
1167
    if self.op.start:
1168
      hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1169
                                self.op.hvparams)
1170
      CheckNodeFreeMemory(self, self.pnode.uuid,
1171
                          "creating instance %s" % self.op.instance_name,
1172
                          self.be_full[constants.BE_MAXMEM],
1173
                          self.op.hypervisor, hvfull)
1174

    
1175
    self.dry_run_result = list(node_uuids)
1176

    
1177
  def Exec(self, feedback_fn):
1178
    """Create and add the instance to the cluster.
1179

1180
    """
1181
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1182
                self.owned_locks(locking.LEVEL_NODE)), \
1183
      "Node locks differ from node resource locks"
1184
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1185

    
1186
    ht_kind = self.op.hypervisor
1187
    if ht_kind in constants.HTS_REQ_PORT:
1188
      network_port = self.cfg.AllocatePort()
1189
    else:
1190
      network_port = None
1191

    
1192
    instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1193

    
1194
    # This is ugly but we got a chicken-egg problem here
1195
    # We can only take the group disk parameters, as the instance
1196
    # has no disks yet (we are generating them right here).
1197
    nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1198
    disks = GenerateDiskTemplate(self,
1199
                                 self.op.disk_template,
1200
                                 instance_uuid, self.pnode.uuid,
1201
                                 self.secondaries,
1202
                                 self.disks,
1203
                                 self.instance_file_storage_dir,
1204
                                 self.op.file_driver,
1205
                                 0,
1206
                                 feedback_fn,
1207
                                 self.cfg.GetGroupDiskParams(nodegroup))
1208

    
1209
    iobj = objects.Instance(name=self.op.instance_name,
1210
                            uuid=instance_uuid,
1211
                            os=self.op.os_type,
1212
                            primary_node=self.pnode.uuid,
1213
                            nics=self.nics, disks=disks,
1214
                            disk_template=self.op.disk_template,
1215
                            disks_active=False,
1216
                            admin_state=constants.ADMINST_DOWN,
1217
                            network_port=network_port,
1218
                            beparams=self.op.beparams,
1219
                            hvparams=self.op.hvparams,
1220
                            hypervisor=self.op.hypervisor,
1221
                            osparams=self.op.osparams,
1222
                            )
1223

    
1224
    if self.op.tags:
1225
      for tag in self.op.tags:
1226
        iobj.AddTag(tag)
1227

    
1228
    if self.adopt_disks:
1229
      if self.op.disk_template == constants.DT_PLAIN:
1230
        # rename LVs to the newly-generated names; we need to construct
1231
        # 'fake' LV disks with the old data, plus the new unique_id
1232
        tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1233
        rename_to = []
1234
        for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1235
          rename_to.append(t_dsk.logical_id)
1236
          t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1237
          self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1238
        result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1239
                                               zip(tmp_disks, rename_to))
1240
        result.Raise("Failed to rename adoped LVs")
1241
    else:
1242
      feedback_fn("* creating instance disks...")
1243
      try:
1244
        CreateDisks(self, iobj)
1245
      except errors.OpExecError:
1246
        self.LogWarning("Device creation failed")
1247
        self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1248
        raise
1249

    
1250
    feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1251

    
1252
    self.cfg.AddInstance(iobj, self.proc.GetECId())
1253

    
1254
    # Declare that we don't want to remove the instance lock anymore, as we've
1255
    # added the instance to the config
1256
    del self.remove_locks[locking.LEVEL_INSTANCE]
1257

    
1258
    if self.op.mode == constants.INSTANCE_IMPORT:
1259
      # Release unused nodes
1260
      ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1261
    else:
1262
      # Release all nodes
1263
      ReleaseLocks(self, locking.LEVEL_NODE)
1264

    
1265
    disk_abort = False
1266
    if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1267
      feedback_fn("* wiping instance disks...")
1268
      try:
1269
        WipeDisks(self, iobj)
1270
      except errors.OpExecError, err:
1271
        logging.exception("Wiping disks failed")
1272
        self.LogWarning("Wiping instance disks failed (%s)", err)
1273
        disk_abort = True
1274

    
1275
    if disk_abort:
1276
      # Something is already wrong with the disks, don't do anything else
1277
      pass
1278
    elif self.op.wait_for_sync:
1279
      disk_abort = not WaitForSync(self, iobj)
1280
    elif iobj.disk_template in constants.DTS_INT_MIRROR:
1281
      # make sure the disks are not degraded (still sync-ing is ok)
1282
      feedback_fn("* checking mirrors status")
1283
      disk_abort = not WaitForSync(self, iobj, oneshot=True)
1284
    else:
1285
      disk_abort = False
1286

    
1287
    if disk_abort:
1288
      RemoveDisks(self, iobj)
1289
      self.cfg.RemoveInstance(iobj.uuid)
1290
      # Make sure the instance lock gets removed
1291
      self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1292
      raise errors.OpExecError("There are some degraded disks for"
1293
                               " this instance")
1294

    
1295
    # instance disks are now active
1296
    iobj.disks_active = True
1297

    
1298
    # Release all node resource locks
1299
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
1300

    
1301
    if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1302
      # we need to set the disks ID to the primary node, since the
1303
      # preceding code might or might have not done it, depending on
1304
      # disk template and other options
1305
      for disk in iobj.disks:
1306
        self.cfg.SetDiskID(disk, self.pnode.uuid)
1307
      if self.op.mode == constants.INSTANCE_CREATE:
1308
        if not self.op.no_install:
1309
          pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1310
                        not self.op.wait_for_sync)
1311
          if pause_sync:
1312
            feedback_fn("* pausing disk sync to install instance OS")
1313
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1314
                                                              (iobj.disks,
1315
                                                               iobj), True)
1316
            for idx, success in enumerate(result.payload):
1317
              if not success:
1318
                logging.warn("pause-sync of instance %s for disk %d failed",
1319
                             self.op.instance_name, idx)
1320

    
1321
          feedback_fn("* running the instance OS create scripts...")
1322
          # FIXME: pass debug option from opcode to backend
1323
          os_add_result = \
1324
            self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1325
                                          self.op.debug_level)
1326
          if pause_sync:
1327
            feedback_fn("* resuming disk sync")
1328
            result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1329
                                                              (iobj.disks,
1330
                                                               iobj), False)
1331
            for idx, success in enumerate(result.payload):
1332
              if not success:
1333
                logging.warn("resume-sync of instance %s for disk %d failed",
1334
                             self.op.instance_name, idx)
1335

    
1336
          os_add_result.Raise("Could not add os for instance %s"
1337
                              " on node %s" % (self.op.instance_name,
1338
                                               self.pnode.name))
1339

    
1340
      else:
1341
        if self.op.mode == constants.INSTANCE_IMPORT:
1342
          feedback_fn("* running the instance OS import scripts...")
1343

    
1344
          transfers = []
1345

    
1346
          for idx, image in enumerate(self.src_images):
1347
            if not image:
1348
              continue
1349

    
1350
            # FIXME: pass debug option from opcode to backend
1351
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1352
                                               constants.IEIO_FILE, (image, ),
1353
                                               constants.IEIO_SCRIPT,
1354
                                               (iobj.disks[idx], idx),
1355
                                               None)
1356
            transfers.append(dt)
1357

    
1358
          import_result = \
1359
            masterd.instance.TransferInstanceData(self, feedback_fn,
1360
                                                  self.op.src_node_uuid,
1361
                                                  self.pnode.uuid,
1362
                                                  self.pnode.secondary_ip,
1363
                                                  iobj, transfers)
1364
          if not compat.all(import_result):
1365
            self.LogWarning("Some disks for instance %s on node %s were not"
1366
                            " imported successfully" % (self.op.instance_name,
1367
                                                        self.pnode.name))
1368

    
1369
          rename_from = self._old_instance_name
1370

    
1371
        elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1372
          feedback_fn("* preparing remote import...")
1373
          # The source cluster will stop the instance before attempting to make
1374
          # a connection. In some cases stopping an instance can take a long
1375
          # time, hence the shutdown timeout is added to the connection
1376
          # timeout.
1377
          connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1378
                             self.op.source_shutdown_timeout)
1379
          timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1380

    
1381
          assert iobj.primary_node == self.pnode.uuid
1382
          disk_results = \
1383
            masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1384
                                          self.source_x509_ca,
1385
                                          self._cds, timeouts)
1386
          if not compat.all(disk_results):
1387
            # TODO: Should the instance still be started, even if some disks
1388
            # failed to import (valid for local imports, too)?
1389
            self.LogWarning("Some disks for instance %s on node %s were not"
1390
                            " imported successfully" % (self.op.instance_name,
1391
                                                        self.pnode.name))
1392

    
1393
          rename_from = self.source_instance_name
1394

    
1395
        else:
1396
          # also checked in the prereq part
1397
          raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1398
                                       % self.op.mode)
1399

    
1400
        # Run rename script on newly imported instance
1401
        assert iobj.name == self.op.instance_name
1402
        feedback_fn("Running rename script for %s" % self.op.instance_name)
1403
        result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1404
                                                   rename_from,
1405
                                                   self.op.debug_level)
1406
        result.Warn("Failed to run rename script for %s on node %s" %
1407
                    (self.op.instance_name, self.pnode.name), self.LogWarning)
1408

    
1409
    assert not self.owned_locks(locking.LEVEL_NODE_RES)
1410

    
1411
    if self.op.start:
1412
      iobj.admin_state = constants.ADMINST_UP
1413
      self.cfg.Update(iobj, feedback_fn)
1414
      logging.info("Starting instance %s on node %s", self.op.instance_name,
1415
                   self.pnode.name)
1416
      feedback_fn("* starting instance...")
1417
      result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1418
                                            False, self.op.reason)
1419
      result.Raise("Could not start instance")
1420

    
1421
    return list(iobj.all_nodes)
1422

    
1423

    
1424
class LUInstanceRename(LogicalUnit):
1425
  """Rename an instance.
1426

1427
  """
1428
  HPATH = "instance-rename"
1429
  HTYPE = constants.HTYPE_INSTANCE
1430

    
1431
  def CheckArguments(self):
1432
    """Check arguments.
1433

1434
    """
1435
    if self.op.ip_check and not self.op.name_check:
1436
      # TODO: make the ip check more flexible and not depend on the name check
1437
      raise errors.OpPrereqError("IP address check requires a name check",
1438
                                 errors.ECODE_INVAL)
1439

    
1440
  def BuildHooksEnv(self):
1441
    """Build hooks env.
1442

1443
    This runs on master, primary and secondary nodes of the instance.
1444

1445
    """
1446
    env = BuildInstanceHookEnvByObject(self, self.instance)
1447
    env["INSTANCE_NEW_NAME"] = self.op.new_name
1448
    return env
1449

    
1450
  def BuildHooksNodes(self):
1451
    """Build hooks nodes.
1452

1453
    """
1454
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1455
    return (nl, nl)
1456

    
1457
  def CheckPrereq(self):
1458
    """Check prerequisites.
1459

1460
    This checks that the instance is in the cluster and is not running.
1461

1462
    """
1463
    (self.op.instance_uuid, self.op.instance_name) = \
1464
      ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1465
                                self.op.instance_name)
1466
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1467
    assert instance is not None
1468
    CheckNodeOnline(self, instance.primary_node)
1469
    CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1470
                       msg="cannot rename")
1471
    self.instance = instance
1472

    
1473
    new_name = self.op.new_name
1474
    if self.op.name_check:
1475
      hostname = _CheckHostnameSane(self, new_name)
1476
      new_name = self.op.new_name = hostname.name
1477
      if (self.op.ip_check and
1478
          netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1479
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
1480
                                   (hostname.ip, new_name),
1481
                                   errors.ECODE_NOTUNIQUE)
1482

    
1483
    instance_names = [inst.name for
1484
                      inst in self.cfg.GetAllInstancesInfo().values()]
1485
    if new_name in instance_names and new_name != instance.name:
1486
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1487
                                 new_name, errors.ECODE_EXISTS)
1488

    
1489
  def Exec(self, feedback_fn):
1490
    """Rename the instance.
1491

1492
    """
1493
    old_name = self.instance.name
1494

    
1495
    rename_file_storage = False
1496
    if (self.instance.disk_template in constants.DTS_FILEBASED and
1497
        self.op.new_name != self.instance.name):
1498
      old_file_storage_dir = os.path.dirname(
1499
                               self.instance.disks[0].logical_id[1])
1500
      rename_file_storage = True
1501

    
1502
    self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1503
    # Change the instance lock. This is definitely safe while we hold the BGL.
1504
    # Otherwise the new lock would have to be added in acquired mode.
1505
    assert self.REQ_BGL
1506
    assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1507
    self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1508
    self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1509

    
1510
    # re-read the instance from the configuration after rename
1511
    renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1512

    
1513
    if rename_file_storage:
1514
      new_file_storage_dir = os.path.dirname(
1515
                               renamed_inst.disks[0].logical_id[1])
1516
      result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1517
                                                     old_file_storage_dir,
1518
                                                     new_file_storage_dir)
1519
      result.Raise("Could not rename on node %s directory '%s' to '%s'"
1520
                   " (but the instance has been renamed in Ganeti)" %
1521
                   (self.cfg.GetNodeName(renamed_inst.primary_node),
1522
                    old_file_storage_dir, new_file_storage_dir))
1523

    
1524
    StartInstanceDisks(self, renamed_inst, None)
1525
    # update info on disks
1526
    info = GetInstanceInfoText(renamed_inst)
1527
    for (idx, disk) in enumerate(renamed_inst.disks):
1528
      for node_uuid in renamed_inst.all_nodes:
1529
        self.cfg.SetDiskID(disk, node_uuid)
1530
        result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1531
        result.Warn("Error setting info on node %s for disk %s" %
1532
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1533
    try:
1534
      result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1535
                                                 renamed_inst, old_name,
1536
                                                 self.op.debug_level)
1537
      result.Warn("Could not run OS rename script for instance %s on node %s"
1538
                  " (but the instance has been renamed in Ganeti)" %
1539
                  (renamed_inst.name,
1540
                   self.cfg.GetNodeName(renamed_inst.primary_node)),
1541
                  self.LogWarning)
1542
    finally:
1543
      ShutdownInstanceDisks(self, renamed_inst)
1544

    
1545
    return renamed_inst.name
1546

    
1547

    
1548
class LUInstanceRemove(LogicalUnit):
1549
  """Remove an instance.
1550

1551
  """
1552
  HPATH = "instance-remove"
1553
  HTYPE = constants.HTYPE_INSTANCE
1554
  REQ_BGL = False
1555

    
1556
  def ExpandNames(self):
1557
    self._ExpandAndLockInstance()
1558
    self.needed_locks[locking.LEVEL_NODE] = []
1559
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1560
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1561

    
1562
  def DeclareLocks(self, level):
1563
    if level == locking.LEVEL_NODE:
1564
      self._LockInstancesNodes()
1565
    elif level == locking.LEVEL_NODE_RES:
1566
      # Copy node locks
1567
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1568
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1569

    
1570
  def BuildHooksEnv(self):
1571
    """Build hooks env.
1572

1573
    This runs on master, primary and secondary nodes of the instance.
1574

1575
    """
1576
    env = BuildInstanceHookEnvByObject(self, self.instance)
1577
    env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1578
    return env
1579

    
1580
  def BuildHooksNodes(self):
1581
    """Build hooks nodes.
1582

1583
    """
1584
    nl = [self.cfg.GetMasterNode()]
1585
    nl_post = list(self.instance.all_nodes) + nl
1586
    return (nl, nl_post)
1587

    
1588
  def CheckPrereq(self):
1589
    """Check prerequisites.
1590

1591
    This checks that the instance is in the cluster.
1592

1593
    """
1594
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1595
    assert self.instance is not None, \
1596
      "Cannot retrieve locked instance %s" % self.op.instance_name
1597

    
1598
  def Exec(self, feedback_fn):
1599
    """Remove the instance.
1600

1601
    """
1602
    logging.info("Shutting down instance %s on node %s", self.instance.name,
1603
                 self.cfg.GetNodeName(self.instance.primary_node))
1604

    
1605
    result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1606
                                             self.instance,
1607
                                             self.op.shutdown_timeout,
1608
                                             self.op.reason)
1609
    if self.op.ignore_failures:
1610
      result.Warn("Warning: can't shutdown instance", feedback_fn)
1611
    else:
1612
      result.Raise("Could not shutdown instance %s on node %s" %
1613
                   (self.instance.name,
1614
                    self.cfg.GetNodeName(self.instance.primary_node)))
1615

    
1616
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1617
            self.owned_locks(locking.LEVEL_NODE_RES))
1618
    assert not (set(self.instance.all_nodes) -
1619
                self.owned_locks(locking.LEVEL_NODE)), \
1620
      "Not owning correct locks"
1621

    
1622
    RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1623

    
1624

    
1625
class LUInstanceMove(LogicalUnit):
1626
  """Move an instance by data-copying.
1627

1628
  """
1629
  HPATH = "instance-move"
1630
  HTYPE = constants.HTYPE_INSTANCE
1631
  REQ_BGL = False
1632

    
1633
  def ExpandNames(self):
1634
    self._ExpandAndLockInstance()
1635
    (self.op.target_node_uuid, self.op.target_node) = \
1636
      ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
1637
                            self.op.target_node)
1638
    self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
1639
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1640
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1641

    
1642
  def DeclareLocks(self, level):
1643
    if level == locking.LEVEL_NODE:
1644
      self._LockInstancesNodes(primary_only=True)
1645
    elif level == locking.LEVEL_NODE_RES:
1646
      # Copy node locks
1647
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1648
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1649

    
1650
  def BuildHooksEnv(self):
1651
    """Build hooks env.
1652

1653
    This runs on master, primary and secondary nodes of the instance.
1654

1655
    """
1656
    env = {
1657
      "TARGET_NODE": self.op.target_node,
1658
      "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1659
      }
1660
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1661
    return env
1662

    
1663
  def BuildHooksNodes(self):
1664
    """Build hooks nodes.
1665

1666
    """
1667
    nl = [
1668
      self.cfg.GetMasterNode(),
1669
      self.instance.primary_node,
1670
      self.op.target_node_uuid,
1671
      ]
1672
    return (nl, nl)
1673

    
1674
  def CheckPrereq(self):
1675
    """Check prerequisites.
1676

1677
    This checks that the instance is in the cluster.
1678

1679
    """
1680
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1681
    assert self.instance is not None, \
1682
      "Cannot retrieve locked instance %s" % self.op.instance_name
1683

    
1684
    if self.instance.disk_template not in constants.DTS_COPYABLE:
1685
      raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1686
                                 self.instance.disk_template,
1687
                                 errors.ECODE_STATE)
1688

    
1689
    target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1690
    assert target_node is not None, \
1691
      "Cannot retrieve locked node %s" % self.op.target_node
1692

    
1693
    self.target_node_uuid = target_node.uuid
1694
    if target_node.uuid == self.instance.primary_node:
1695
      raise errors.OpPrereqError("Instance %s is already on the node %s" %
1696
                                 (self.instance.name, target_node.name),
1697
                                 errors.ECODE_STATE)
1698

    
1699
    bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1700

    
1701
    for idx, dsk in enumerate(self.instance.disks):
1702
      if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1703
        raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1704
                                   " cannot copy" % idx, errors.ECODE_STATE)
1705

    
1706
    CheckNodeOnline(self, target_node.uuid)
1707
    CheckNodeNotDrained(self, target_node.uuid)
1708
    CheckNodeVmCapable(self, target_node.uuid)
1709
    cluster = self.cfg.GetClusterInfo()
1710
    group_info = self.cfg.GetNodeGroup(target_node.group)
1711
    ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1712
    CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1713
                           ignore=self.op.ignore_ipolicy)
1714

    
1715
    if self.instance.admin_state == constants.ADMINST_UP:
1716
      # check memory requirements on the secondary node
1717
      CheckNodeFreeMemory(
1718
          self, target_node.uuid, "failing over instance %s" %
1719
          self.instance.name, bep[constants.BE_MAXMEM],
1720
          self.instance.hypervisor,
1721
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1722
    else:
1723
      self.LogInfo("Not checking memory on the secondary node as"
1724
                   " instance will not be started")
1725

    
1726
    # check bridge existance
1727
    CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1728

    
1729
  def Exec(self, feedback_fn):
1730
    """Move an instance.
1731

1732
    The move is done by shutting it down on its present node, copying
1733
    the data over (slow) and starting it on the new node.
1734

1735
    """
1736
    source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1737
    target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1738

    
1739
    self.LogInfo("Shutting down instance %s on source node %s",
1740
                 self.instance.name, source_node.name)
1741

    
1742
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1743
            self.owned_locks(locking.LEVEL_NODE_RES))
1744

    
1745
    result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1746
                                             self.op.shutdown_timeout,
1747
                                             self.op.reason)
1748
    if self.op.ignore_consistency:
1749
      result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1750
                  " anyway. Please make sure node %s is down. Error details" %
1751
                  (self.instance.name, source_node.name, source_node.name),
1752
                  self.LogWarning)
1753
    else:
1754
      result.Raise("Could not shutdown instance %s on node %s" %
1755
                   (self.instance.name, source_node.name))
1756

    
1757
    # create the target disks
1758
    try:
1759
      CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1760
    except errors.OpExecError:
1761
      self.LogWarning("Device creation failed")
1762
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1763
      raise
1764

    
1765
    cluster_name = self.cfg.GetClusterInfo().cluster_name
1766

    
1767
    errs = []
1768
    # activate, get path, copy the data over
1769
    for idx, disk in enumerate(self.instance.disks):
1770
      self.LogInfo("Copying data for disk %d", idx)
1771
      result = self.rpc.call_blockdev_assemble(
1772
                 target_node.uuid, (disk, self.instance), self.instance.name,
1773
                 True, idx)
1774
      if result.fail_msg:
1775
        self.LogWarning("Can't assemble newly created disk %d: %s",
1776
                        idx, result.fail_msg)
1777
        errs.append(result.fail_msg)
1778
        break
1779
      dev_path = result.payload
1780
      result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1781
                                                                self.instance),
1782
                                             target_node.name, dev_path,
1783
                                             cluster_name)
1784
      if result.fail_msg:
1785
        self.LogWarning("Can't copy data over for disk %d: %s",
1786
                        idx, result.fail_msg)
1787
        errs.append(result.fail_msg)
1788
        break
1789

    
1790
    if errs:
1791
      self.LogWarning("Some disks failed to copy, aborting")
1792
      try:
1793
        RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1794
      finally:
1795
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1796
        raise errors.OpExecError("Errors during disk copy: %s" %
1797
                                 (",".join(errs),))
1798

    
1799
    self.instance.primary_node = target_node.uuid
1800
    self.cfg.Update(self.instance, feedback_fn)
1801

    
1802
    self.LogInfo("Removing the disks on the original node")
1803
    RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1804

    
1805
    # Only start the instance if it's marked as up
1806
    if self.instance.admin_state == constants.ADMINST_UP:
1807
      self.LogInfo("Starting instance %s on node %s",
1808
                   self.instance.name, target_node.name)
1809

    
1810
      disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1811
                                          ignore_secondaries=True)
1812
      if not disks_ok:
1813
        ShutdownInstanceDisks(self, self.instance)
1814
        raise errors.OpExecError("Can't activate the instance's disks")
1815

    
1816
      result = self.rpc.call_instance_start(target_node.uuid,
1817
                                            (self.instance, None, None), False,
1818
                                            self.op.reason)
1819
      msg = result.fail_msg
1820
      if msg:
1821
        ShutdownInstanceDisks(self, self.instance)
1822
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1823
                                 (self.instance.name, target_node.name, msg))
1824

    
1825

    
1826
class LUInstanceMultiAlloc(NoHooksLU):
1827
  """Allocates multiple instances at the same time.
1828

1829
  """
1830
  REQ_BGL = False
1831

    
1832
  def CheckArguments(self):
1833
    """Check arguments.
1834

1835
    """
1836
    nodes = []
1837
    for inst in self.op.instances:
1838
      if inst.iallocator is not None:
1839
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
1840
                                   " instance objects", errors.ECODE_INVAL)
1841
      nodes.append(bool(inst.pnode))
1842
      if inst.disk_template in constants.DTS_INT_MIRROR:
1843
        nodes.append(bool(inst.snode))
1844

    
1845
    has_nodes = compat.any(nodes)
1846
    if compat.all(nodes) ^ has_nodes:
1847
      raise errors.OpPrereqError("There are instance objects providing"
1848
                                 " pnode/snode while others do not",
1849
                                 errors.ECODE_INVAL)
1850

    
1851
    if self.op.iallocator is None:
1852
      default_iallocator = self.cfg.GetDefaultIAllocator()
1853
      if default_iallocator and has_nodes:
1854
        self.op.iallocator = default_iallocator
1855
      else:
1856
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
1857
                                   " given and no cluster-wide default"
1858
                                   " iallocator found; please specify either"
1859
                                   " an iallocator or nodes on the instances"
1860
                                   " or set a cluster-wide default iallocator",
1861
                                   errors.ECODE_INVAL)
1862

    
1863
    _CheckOpportunisticLocking(self.op)
1864

    
1865
    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1866
    if dups:
1867
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
1868
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
1869

    
1870
  def ExpandNames(self):
1871
    """Calculate the locks.
1872

1873
    """
1874
    self.share_locks = ShareAll()
1875
    self.needed_locks = {
1876
      # iallocator will select nodes and even if no iallocator is used,
1877
      # collisions with LUInstanceCreate should be avoided
1878
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1879
      }
1880

    
1881
    if self.op.iallocator:
1882
      self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1883
      self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1884

    
1885
      if self.op.opportunistic_locking:
1886
        self.opportunistic_locks[locking.LEVEL_NODE] = True
1887
        self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
1888
    else:
1889
      nodeslist = []
1890
      for inst in self.op.instances:
1891
        (inst.pnode_uuid, inst.pnode) = \
1892
          ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1893
        nodeslist.append(inst.pnode)
1894
        if inst.snode is not None:
1895
          (inst.snode_uuid, inst.snode) = \
1896
            ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1897
          nodeslist.append(inst.snode)
1898

    
1899
      self.needed_locks[locking.LEVEL_NODE] = nodeslist
1900
      # Lock resources of instance's primary and secondary nodes (copy to
1901
      # prevent accidential modification)
1902
      self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1903

    
1904
  def CheckPrereq(self):
1905
    """Check prerequisite.
1906

1907
    """
1908
    cluster = self.cfg.GetClusterInfo()
1909
    default_vg = self.cfg.GetVGName()
1910
    ec_id = self.proc.GetECId()
1911

    
1912
    if self.op.opportunistic_locking:
1913
      # Only consider nodes for which a lock is held
1914
      node_whitelist = self.cfg.GetNodeNames(
1915
                         list(self.owned_locks(locking.LEVEL_NODE)))
1916
    else:
1917
      node_whitelist = None
1918

    
1919
    insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1920
                                         _ComputeNics(op, cluster, None,
1921
                                                      self.cfg, ec_id),
1922
                                         _ComputeFullBeParams(op, cluster),
1923
                                         node_whitelist)
1924
             for op in self.op.instances]
1925

    
1926
    req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1927
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1928

    
1929
    ial.Run(self.op.iallocator)
1930

    
1931
    if not ial.success:
1932
      raise errors.OpPrereqError("Can't compute nodes using"
1933
                                 " iallocator '%s': %s" %
1934
                                 (self.op.iallocator, ial.info),
1935
                                 errors.ECODE_NORES)
1936

    
1937
    self.ia_result = ial.result
1938

    
1939
    if self.op.dry_run:
1940
      self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1941
        constants.JOB_IDS_KEY: [],
1942
        })
1943

    
1944
  def _ConstructPartialResult(self):
1945
    """Contructs the partial result.
1946

1947
    """
1948
    (allocatable, failed) = self.ia_result
1949
    return {
1950
      opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
1951
        map(compat.fst, allocatable),
1952
      opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
1953
      }
1954

    
1955
  def Exec(self, feedback_fn):
1956
    """Executes the opcode.
1957

1958
    """
1959
    op2inst = dict((op.instance_name, op) for op in self.op.instances)
1960
    (allocatable, failed) = self.ia_result
1961

    
1962
    jobs = []
1963
    for (name, node_names) in allocatable:
1964
      op = op2inst.pop(name)
1965

    
1966
      (op.pnode_uuid, op.pnode) = \
1967
        ExpandNodeUuidAndName(self.cfg, None, node_names[0])
1968
      if len(node_names) > 1:
1969
        (op.snode_uuid, op.snode) = \
1970
          ExpandNodeUuidAndName(self.cfg, None, node_names[1])
1971

    
1972
      jobs.append([op])
1973

    
1974
    missing = set(op2inst.keys()) - set(failed)
1975
    assert not missing, \
1976
      "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
1977

    
1978
    return ResultWithJobs(jobs, **self._ConstructPartialResult())
1979

    
1980

    
1981
class _InstNicModPrivate:
1982
  """Data structure for network interface modifications.
1983

1984
  Used by L{LUInstanceSetParams}.
1985

1986
  """
1987
  def __init__(self):
1988
    self.params = None
1989
    self.filled = None
1990

    
1991

    
1992
def _PrepareContainerMods(mods, private_fn):
1993
  """Prepares a list of container modifications by adding a private data field.
1994

1995
  @type mods: list of tuples; (operation, index, parameters)
1996
  @param mods: List of modifications
1997
  @type private_fn: callable or None
1998
  @param private_fn: Callable for constructing a private data field for a
1999
    modification
2000
  @rtype: list
2001

2002
  """
2003
  if private_fn is None:
2004
    fn = lambda: None
2005
  else:
2006
    fn = private_fn
2007

    
2008
  return [(op, idx, params, fn()) for (op, idx, params) in mods]
2009

    
2010

    
2011
def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2012
  """Checks if nodes have enough physical CPUs
2013

2014
  This function checks if all given nodes have the needed number of
2015
  physical CPUs. In case any node has less CPUs or we cannot get the
2016
  information from the node, this function raises an OpPrereqError
2017
  exception.
2018

2019
  @type lu: C{LogicalUnit}
2020
  @param lu: a logical unit from which we get configuration data
2021
  @type node_uuids: C{list}
2022
  @param node_uuids: the list of node UUIDs to check
2023
  @type requested: C{int}
2024
  @param requested: the minimum acceptable number of physical CPUs
2025
  @type hypervisor_specs: list of pairs (string, dict of strings)
2026
  @param hypervisor_specs: list of hypervisor specifications in
2027
      pairs (hypervisor_name, hvparams)
2028
  @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2029
      or we cannot check the node
2030

2031
  """
2032
  nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs, None)
2033
  for node_uuid in node_uuids:
2034
    info = nodeinfo[node_uuid]
2035
    node_name = lu.cfg.GetNodeName(node_uuid)
2036
    info.Raise("Cannot get current information from node %s" % node_name,
2037
               prereq=True, ecode=errors.ECODE_ENVIRON)
2038
    (_, _, (hv_info, )) = info.payload
2039
    num_cpus = hv_info.get("cpu_total", None)
2040
    if not isinstance(num_cpus, int):
2041
      raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2042
                                 " on node %s, result was '%s'" %
2043
                                 (node_name, num_cpus), errors.ECODE_ENVIRON)
2044
    if requested > num_cpus:
2045
      raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2046
                                 "required" % (node_name, num_cpus, requested),
2047
                                 errors.ECODE_NORES)
2048

    
2049

    
2050
def GetItemFromContainer(identifier, kind, container):
2051
  """Return the item refered by the identifier.
2052

2053
  @type identifier: string
2054
  @param identifier: Item index or name or UUID
2055
  @type kind: string
2056
  @param kind: One-word item description
2057
  @type container: list
2058
  @param container: Container to get the item from
2059

2060
  """
2061
  # Index
2062
  try:
2063
    idx = int(identifier)
2064
    if idx == -1:
2065
      # Append
2066
      absidx = len(container) - 1
2067
    elif idx < 0:
2068
      raise IndexError("Not accepting negative indices other than -1")
2069
    elif idx > len(container):
2070
      raise IndexError("Got %s index %s, but there are only %s" %
2071
                       (kind, idx, len(container)))
2072
    else:
2073
      absidx = idx
2074
    return (absidx, container[idx])
2075
  except ValueError:
2076
    pass
2077

    
2078
  for idx, item in enumerate(container):
2079
    if item.uuid == identifier or item.name == identifier:
2080
      return (idx, item)
2081

    
2082
  raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2083
                             (kind, identifier), errors.ECODE_NOENT)
2084

    
2085

    
2086
def _ApplyContainerMods(kind, container, chgdesc, mods,
2087
                        create_fn, modify_fn, remove_fn):
2088
  """Applies descriptions in C{mods} to C{container}.
2089

2090
  @type kind: string
2091
  @param kind: One-word item description
2092
  @type container: list
2093
  @param container: Container to modify
2094
  @type chgdesc: None or list
2095
  @param chgdesc: List of applied changes
2096
  @type mods: list
2097
  @param mods: Modifications as returned by L{_PrepareContainerMods}
2098
  @type create_fn: callable
2099
  @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2100
    receives absolute item index, parameters and private data object as added
2101
    by L{_PrepareContainerMods}, returns tuple containing new item and changes
2102
    as list
2103
  @type modify_fn: callable
2104
  @param modify_fn: Callback for modifying an existing item
2105
    (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2106
    and private data object as added by L{_PrepareContainerMods}, returns
2107
    changes as list
2108
  @type remove_fn: callable
2109
  @param remove_fn: Callback on removing item; receives absolute item index,
2110
    item and private data object as added by L{_PrepareContainerMods}
2111

2112
  """
2113
  for (op, identifier, params, private) in mods:
2114
    changes = None
2115

    
2116
    if op == constants.DDM_ADD:
2117
      # Calculate where item will be added
2118
      # When adding an item, identifier can only be an index
2119
      try:
2120
        idx = int(identifier)
2121
      except ValueError:
2122
        raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2123
                                   " identifier for %s" % constants.DDM_ADD,
2124
                                   errors.ECODE_INVAL)
2125
      if idx == -1:
2126
        addidx = len(container)
2127
      else:
2128
        if idx < 0:
2129
          raise IndexError("Not accepting negative indices other than -1")
2130
        elif idx > len(container):
2131
          raise IndexError("Got %s index %s, but there are only %s" %
2132
                           (kind, idx, len(container)))
2133
        addidx = idx
2134

    
2135
      if create_fn is None:
2136
        item = params
2137
      else:
2138
        (item, changes) = create_fn(addidx, params, private)
2139

    
2140
      if idx == -1:
2141
        container.append(item)
2142
      else:
2143
        assert idx >= 0
2144
        assert idx <= len(container)
2145
        # list.insert does so before the specified index
2146
        container.insert(idx, item)
2147
    else:
2148
      # Retrieve existing item
2149
      (absidx, item) = GetItemFromContainer(identifier, kind, container)
2150

    
2151
      if op == constants.DDM_REMOVE:
2152
        assert not params
2153

    
2154
        if remove_fn is not None:
2155
          remove_fn(absidx, item, private)
2156

    
2157
        changes = [("%s/%s" % (kind, absidx), "remove")]
2158

    
2159
        assert container[absidx] == item
2160
        del container[absidx]
2161
      elif op == constants.DDM_MODIFY:
2162
        if modify_fn is not None:
2163
          changes = modify_fn(absidx, item, params, private)
2164
      else:
2165
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2166

    
2167
    assert _TApplyContModsCbChanges(changes)
2168

    
2169
    if not (chgdesc is None or changes is None):
2170
      chgdesc.extend(changes)
2171

    
2172

    
2173
def _UpdateIvNames(base_index, disks):
2174
  """Updates the C{iv_name} attribute of disks.
2175

2176
  @type disks: list of L{objects.Disk}
2177

2178
  """
2179
  for (idx, disk) in enumerate(disks):
2180
    disk.iv_name = "disk/%s" % (base_index + idx, )
2181

    
2182

    
2183
class LUInstanceSetParams(LogicalUnit):
2184
  """Modifies an instances's parameters.
2185

2186
  """
2187
  HPATH = "instance-modify"
2188
  HTYPE = constants.HTYPE_INSTANCE
2189
  REQ_BGL = False
2190

    
2191
  @staticmethod
2192
  def _UpgradeDiskNicMods(kind, mods, verify_fn):
2193
    assert ht.TList(mods)
2194
    assert not mods or len(mods[0]) in (2, 3)
2195

    
2196
    if mods and len(mods[0]) == 2:
2197
      result = []
2198

    
2199
      addremove = 0
2200
      for op, params in mods:
2201
        if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2202
          result.append((op, -1, params))
2203
          addremove += 1
2204

    
2205
          if addremove > 1:
2206
            raise errors.OpPrereqError("Only one %s add or remove operation is"
2207
                                       " supported at a time" % kind,
2208
                                       errors.ECODE_INVAL)
2209
        else:
2210
          result.append((constants.DDM_MODIFY, op, params))
2211

    
2212
      assert verify_fn(result)
2213
    else:
2214
      result = mods
2215

    
2216
    return result
2217

    
2218
  @staticmethod
2219
  def _CheckMods(kind, mods, key_types, item_fn):
2220
    """Ensures requested disk/NIC modifications are valid.
2221

2222
    """
2223
    for (op, _, params) in mods:
2224
      assert ht.TDict(params)
2225

    
2226
      # If 'key_types' is an empty dict, we assume we have an
2227
      # 'ext' template and thus do not ForceDictType
2228
      if key_types:
2229
        utils.ForceDictType(params, key_types)
2230

    
2231
      if op == constants.DDM_REMOVE:
2232
        if params:
2233
          raise errors.OpPrereqError("No settings should be passed when"
2234
                                     " removing a %s" % kind,
2235
                                     errors.ECODE_INVAL)
2236
      elif op in (constants.DDM_ADD, constants.DDM_MODIFY):
2237
        item_fn(op, params)
2238
      else:
2239
        raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2240

    
2241
  @staticmethod
2242
  def _VerifyDiskModification(op, params, excl_stor):
2243
    """Verifies a disk modification.
2244

2245
    """
2246
    if op == constants.DDM_ADD:
2247
      mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2248
      if mode not in constants.DISK_ACCESS_SET:
2249
        raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2250
                                   errors.ECODE_INVAL)
2251

    
2252
      size = params.get(constants.IDISK_SIZE, None)
2253
      if size is None:
2254
        raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2255
                                   constants.IDISK_SIZE, errors.ECODE_INVAL)
2256

    
2257
      try:
2258
        size = int(size)
2259
      except (TypeError, ValueError), err:
2260
        raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2261
                                   errors.ECODE_INVAL)
2262

    
2263
      params[constants.IDISK_SIZE] = size
2264
      name = params.get(constants.IDISK_NAME, None)
2265
      if name is not None and name.lower() == constants.VALUE_NONE:
2266
        params[constants.IDISK_NAME] = None
2267

    
2268
      CheckSpindlesExclusiveStorage(params, excl_stor, True)
2269

    
2270
    elif op == constants.DDM_MODIFY:
2271
      if constants.IDISK_SIZE in params:
2272
        raise errors.OpPrereqError("Disk size change not possible, use"
2273
                                   " grow-disk", errors.ECODE_INVAL)
2274
      if len(params) > 2:
2275
        raise errors.OpPrereqError("Disk modification doesn't support"
2276
                                   " additional arbitrary parameters",
2277
                                   errors.ECODE_INVAL)
2278
      name = params.get(constants.IDISK_NAME, None)
2279
      if name is not None and name.lower() == constants.VALUE_NONE:
2280
        params[constants.IDISK_NAME] = None
2281

    
2282
  @staticmethod
2283
  def _VerifyNicModification(op, params):
2284
    """Verifies a network interface modification.
2285

2286
    """
2287
    if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2288
      ip = params.get(constants.INIC_IP, None)
2289
      name = params.get(constants.INIC_NAME, None)
2290
      req_net = params.get(constants.INIC_NETWORK, None)
2291
      link = params.get(constants.NIC_LINK, None)
2292
      mode = params.get(constants.NIC_MODE, None)
2293
      if name is not None and name.lower() == constants.VALUE_NONE:
2294
        params[constants.INIC_NAME] = None
2295
      if req_net is not None:
2296
        if req_net.lower() == constants.VALUE_NONE:
2297
          params[constants.INIC_NETWORK] = None
2298
          req_net = None
2299
        elif link is not None or mode is not None:
2300
          raise errors.OpPrereqError("If network is given"
2301
                                     " mode or link should not",
2302
                                     errors.ECODE_INVAL)
2303

    
2304
      if op == constants.DDM_ADD:
2305
        macaddr = params.get(constants.INIC_MAC, None)
2306
        if macaddr is None:
2307
          params[constants.INIC_MAC] = constants.VALUE_AUTO
2308

    
2309
      if ip is not None:
2310
        if ip.lower() == constants.VALUE_NONE:
2311
          params[constants.INIC_IP] = None
2312
        else:
2313
          if ip.lower() == constants.NIC_IP_POOL:
2314
            if op == constants.DDM_ADD and req_net is None:
2315
              raise errors.OpPrereqError("If ip=pool, parameter network"
2316
                                         " cannot be none",
2317
                                         errors.ECODE_INVAL)
2318
          else:
2319
            if not netutils.IPAddress.IsValid(ip):
2320
              raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2321
                                         errors.ECODE_INVAL)
2322

    
2323
      if constants.INIC_MAC in params:
2324
        macaddr = params[constants.INIC_MAC]
2325
        if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2326
          macaddr = utils.NormalizeAndValidateMac(macaddr)
2327

    
2328
        if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2329
          raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2330
                                     " modifying an existing NIC",
2331
                                     errors.ECODE_INVAL)
2332

    
2333
  def CheckArguments(self):
2334
    if not (self.op.nics or self.op.disks or self.op.disk_template or
2335
            self.op.hvparams or self.op.beparams or self.op.os_name or
2336
            self.op.offline is not None or self.op.runtime_mem or
2337
            self.op.pnode):
2338
      raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2339

    
2340
    if self.op.hvparams:
2341
      CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2342
                           "hypervisor", "instance", "cluster")
2343

    
2344
    self.op.disks = self._UpgradeDiskNicMods(
2345
      "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2346
    self.op.nics = self._UpgradeDiskNicMods(
2347
      "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2348

    
2349
    if self.op.disks and self.op.disk_template is not None:
2350
      raise errors.OpPrereqError("Disk template conversion and other disk"
2351
                                 " changes not supported at the same time",
2352
                                 errors.ECODE_INVAL)
2353

    
2354
    if (self.op.disk_template and
2355
        self.op.disk_template in constants.DTS_INT_MIRROR and
2356
        self.op.remote_node is None):
2357
      raise errors.OpPrereqError("Changing the disk template to a mirrored"
2358
                                 " one requires specifying a secondary node",
2359
                                 errors.ECODE_INVAL)
2360

    
2361
    # Check NIC modifications
2362
    self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2363
                    self._VerifyNicModification)
2364

    
2365
    if self.op.pnode:
2366
      (self.op.pnode_uuid, self.op.pnode) = \
2367
        ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2368

    
2369
  def ExpandNames(self):
2370
    self._ExpandAndLockInstance()
2371
    self.needed_locks[locking.LEVEL_NODEGROUP] = []
2372
    # Can't even acquire node locks in shared mode as upcoming changes in
2373
    # Ganeti 2.6 will start to modify the node object on disk conversion
2374
    self.needed_locks[locking.LEVEL_NODE] = []
2375
    self.needed_locks[locking.LEVEL_NODE_RES] = []
2376
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
2377
    # Look node group to look up the ipolicy
2378
    self.share_locks[locking.LEVEL_NODEGROUP] = 1
2379

    
2380
  def DeclareLocks(self, level):
2381
    if level == locking.LEVEL_NODEGROUP:
2382
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2383
      # Acquire locks for the instance's nodegroups optimistically. Needs
2384
      # to be verified in CheckPrereq
2385
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2386
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2387
    elif level == locking.LEVEL_NODE:
2388
      self._LockInstancesNodes()
2389
      if self.op.disk_template and self.op.remote_node:
2390
        (self.op.remote_node_uuid, self.op.remote_node) = \
2391
          ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2392
                                self.op.remote_node)
2393
        self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2394
    elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2395
      # Copy node locks
2396
      self.needed_locks[locking.LEVEL_NODE_RES] = \
2397
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2398

    
2399
  def BuildHooksEnv(self):
2400
    """Build hooks env.
2401

2402
    This runs on the master, primary and secondaries.
2403

2404
    """
2405
    args = {}
2406
    if constants.BE_MINMEM in self.be_new:
2407
      args["minmem"] = self.be_new[constants.BE_MINMEM]
2408
    if constants.BE_MAXMEM in self.be_new:
2409
      args["maxmem"] = self.be_new[constants.BE_MAXMEM]
2410
    if constants.BE_VCPUS in self.be_new:
2411
      args["vcpus"] = self.be_new[constants.BE_VCPUS]
2412
    # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk
2413
    # information at all.
2414

    
2415
    if self._new_nics is not None:
2416
      nics = []
2417

    
2418
      for nic in self._new_nics:
2419
        n = copy.deepcopy(nic)
2420
        nicparams = self.cluster.SimpleFillNIC(n.nicparams)
2421
        n.nicparams = nicparams
2422
        nics.append(NICToTuple(self, n))
2423

    
2424
      args["nics"] = nics
2425

    
2426
    env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
2427
    if self.op.disk_template:
2428
      env["NEW_DISK_TEMPLATE"] = self.op.disk_template
2429
    if self.op.runtime_mem:
2430
      env["RUNTIME_MEMORY"] = self.op.runtime_mem
2431

    
2432
    return env
2433

    
2434
  def BuildHooksNodes(self):
2435
    """Build hooks nodes.
2436

2437
    """
2438
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
2439
    return (nl, nl)
2440

    
2441
  def _PrepareNicModification(self, params, private, old_ip, old_net_uuid,
2442
                              old_params, cluster, pnode_uuid):
2443

    
2444
    update_params_dict = dict([(key, params[key])
2445
                               for key in constants.NICS_PARAMETERS
2446
                               if key in params])
2447

    
2448
    req_link = update_params_dict.get(constants.NIC_LINK, None)
2449
    req_mode = update_params_dict.get(constants.NIC_MODE, None)
2450

    
2451
    new_net_uuid = None
2452
    new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2453
    if new_net_uuid_or_name:
2454
      new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2455
      new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2456

    
2457
    if old_net_uuid:
2458
      old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2459

    
2460
    if new_net_uuid:
2461
      netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2462
      if not netparams:
2463
        raise errors.OpPrereqError("No netparams found for the network"
2464
                                   " %s, probably not connected" %
2465
                                   new_net_obj.name, errors.ECODE_INVAL)
2466
      new_params = dict(netparams)
2467
    else:
2468
      new_params = GetUpdatedParams(old_params, update_params_dict)
2469

    
2470
    utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2471

    
2472
    new_filled_params = cluster.SimpleFillNIC(new_params)
2473
    objects.NIC.CheckParameterSyntax(new_filled_params)
2474

    
2475
    new_mode = new_filled_params[constants.NIC_MODE]
2476
    if new_mode == constants.NIC_MODE_BRIDGED:
2477
      bridge = new_filled_params[constants.NIC_LINK]
2478
      msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2479
      if msg:
2480
        msg = "Error checking bridges on node '%s': %s" % \
2481
                (self.cfg.GetNodeName(pnode_uuid), msg)
2482
        if self.op.force:
2483
          self.warn.append(msg)
2484
        else:
2485
          raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2486

    
2487
    elif new_mode == constants.NIC_MODE_ROUTED:
2488
      ip = params.get(constants.INIC_IP, old_ip)
2489
      if ip is None:
2490
        raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2491
                                   " on a routed NIC", errors.ECODE_INVAL)
2492

    
2493
    elif new_mode == constants.NIC_MODE_OVS:
2494
      # TODO: check OVS link
2495
      self.LogInfo("OVS links are currently not checked for correctness")
2496

    
2497
    if constants.INIC_MAC in params:
2498
      mac = params[constants.INIC_MAC]
2499
      if mac is None:
2500
        raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2501
                                   errors.ECODE_INVAL)
2502
      elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2503
        # otherwise generate the MAC address
2504
        params[constants.INIC_MAC] = \
2505
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2506
      else:
2507
        # or validate/reserve the current one
2508
        try:
2509
          self.cfg.ReserveMAC(mac, self.proc.GetECId())
2510
        except errors.ReservationError:
2511
          raise errors.OpPrereqError("MAC address '%s' already in use"
2512
                                     " in cluster" % mac,
2513
                                     errors.ECODE_NOTUNIQUE)
2514
    elif new_net_uuid != old_net_uuid:
2515

    
2516
      def get_net_prefix(net_uuid):
2517
        mac_prefix = None
2518
        if net_uuid:
2519
          nobj = self.cfg.GetNetwork(net_uuid)
2520
          mac_prefix = nobj.mac_prefix
2521

    
2522
        return mac_prefix
2523

    
2524
      new_prefix = get_net_prefix(new_net_uuid)
2525
      old_prefix = get_net_prefix(old_net_uuid)
2526
      if old_prefix != new_prefix:
2527
        params[constants.INIC_MAC] = \
2528
          self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2529

    
2530
    # if there is a change in (ip, network) tuple
2531
    new_ip = params.get(constants.INIC_IP, old_ip)
2532
    if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2533
      if new_ip:
2534
        # if IP is pool then require a network and generate one IP
2535
        if new_ip.lower() == constants.NIC_IP_POOL:
2536
          if new_net_uuid:
2537
            try:
2538
              new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2539
            except errors.ReservationError:
2540
              raise errors.OpPrereqError("Unable to get a free IP"
2541
                                         " from the address pool",
2542
                                         errors.ECODE_STATE)
2543
            self.LogInfo("Chose IP %s from network %s",
2544
                         new_ip,
2545
                         new_net_obj.name)
2546
            params[constants.INIC_IP] = new_ip
2547
          else:
2548
            raise errors.OpPrereqError("ip=pool, but no network found",
2549
                                       errors.ECODE_INVAL)
2550
        # Reserve new IP if in the new network if any
2551
        elif new_net_uuid:
2552
          try:
2553
            self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2554
            self.LogInfo("Reserving IP %s in network %s",
2555
                         new_ip, new_net_obj.name)
2556
          except errors.ReservationError:
2557
            raise errors.OpPrereqError("IP %s not available in network %s" %
2558
                                       (new_ip, new_net_obj.name),
2559
                                       errors.ECODE_NOTUNIQUE)
2560
        # new network is None so check if new IP is a conflicting IP
2561
        elif self.op.conflicts_check:
2562
          _CheckForConflictingIp(self, new_ip, pnode_uuid)
2563

    
2564
      # release old IP if old network is not None
2565
      if old_ip and old_net_uuid:
2566
        try:
2567
          self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2568
        except errors.AddressPoolError:
2569
          logging.warning("Release IP %s not contained in network %s",
2570
                          old_ip, old_net_obj.name)
2571

    
2572
    # there are no changes in (ip, network) tuple and old network is not None
2573
    elif (old_net_uuid is not None and
2574
          (req_link is not None or req_mode is not None)):
2575
      raise errors.OpPrereqError("Not allowed to change link or mode of"
2576
                                 " a NIC that is connected to a network",
2577
                                 errors.ECODE_INVAL)
2578

    
2579
    private.params = new_params
2580
    private.filled = new_filled_params
2581

    
2582
  def _PreCheckDiskTemplate(self, pnode_info):
2583
    """CheckPrereq checks related to a new disk template."""
2584
    # Arguments are passed to avoid configuration lookups
2585
    pnode_uuid = self.instance.primary_node
2586
    if self.instance.disk_template == self.op.disk_template:
2587
      raise errors.OpPrereqError("Instance already has disk template %s" %
2588
                                 self.instance.disk_template,
2589
                                 errors.ECODE_INVAL)
2590

    
2591
    if (self.instance.disk_template,
2592
        self.op.disk_template) not in self._DISK_CONVERSIONS:
2593
      raise errors.OpPrereqError("Unsupported disk template conversion from"
2594
                                 " %s to %s" % (self.instance.disk_template,
2595
                                                self.op.disk_template),
2596
                                 errors.ECODE_INVAL)
2597
    CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2598
                       msg="cannot change disk template")
2599
    if self.op.disk_template in constants.DTS_INT_MIRROR:
2600
      if self.op.remote_node_uuid == pnode_uuid:
2601
        raise errors.OpPrereqError("Given new secondary node %s is the same"
2602
                                   " as the primary node of the instance" %
2603
                                   self.op.remote_node, errors.ECODE_STATE)
2604
      CheckNodeOnline(self, self.op.remote_node_uuid)
2605
      CheckNodeNotDrained(self, self.op.remote_node_uuid)
2606
      # FIXME: here we assume that the old instance type is DT_PLAIN
2607
      assert self.instance.disk_template == constants.DT_PLAIN
2608
      disks = [{constants.IDISK_SIZE: d.size,
2609
                constants.IDISK_VG: d.logical_id[0]}
2610
               for d in self.instance.disks]
2611
      required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2612
      CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2613

    
2614
      snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2615
      snode_group = self.cfg.GetNodeGroup(snode_info.group)
2616
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2617
                                                              snode_group)
2618
      CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2619
                             ignore=self.op.ignore_ipolicy)
2620
      if pnode_info.group != snode_info.group:
2621
        self.LogWarning("The primary and secondary nodes are in two"
2622
                        " different node groups; the disk parameters"
2623
                        " from the first disk's node group will be"
2624
                        " used")
2625

    
2626
    if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2627
      # Make sure none of the nodes require exclusive storage
2628
      nodes = [pnode_info]
2629
      if self.op.disk_template in constants.DTS_INT_MIRROR:
2630
        assert snode_info
2631
        nodes.append(snode_info)
2632
      has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2633
      if compat.any(map(has_es, nodes)):
2634
        errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2635
                  " storage is enabled" % (self.instance.disk_template,
2636
                                           self.op.disk_template))
2637
        raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2638

    
2639
  def _PreCheckDisks(self, ispec):
2640
    """CheckPrereq checks related to disk changes.
2641

2642
    @type ispec: dict
2643
    @param ispec: instance specs to be updated with the new disks
2644

2645
    """
2646
    self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2647

    
2648
    excl_stor = compat.any(
2649
      rpc.GetExclusiveStorageForNodes(self.cfg,
2650
                                      self.instance.all_nodes).values()
2651
      )
2652

    
2653
    # Check disk modifications. This is done here and not in CheckArguments
2654
    # (as with NICs), because we need to know the instance's disk template
2655
    ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2656
    if self.instance.disk_template == constants.DT_EXT:
2657
      self._CheckMods("disk", self.op.disks, {}, ver_fn)
2658
    else:
2659
      self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2660
                      ver_fn)
2661

    
2662
    self.diskmod = _PrepareContainerMods(self.op.disks, None)
2663

    
2664
    # Check the validity of the `provider' parameter
2665
    if self.instance.disk_template in constants.DT_EXT:
2666
      for mod in self.diskmod:
2667
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2668
        if mod[0] == constants.DDM_ADD:
2669
          if ext_provider is None:
2670
            raise errors.OpPrereqError("Instance template is '%s' and parameter"
2671
                                       " '%s' missing, during disk add" %
2672
                                       (constants.DT_EXT,
2673
                                        constants.IDISK_PROVIDER),
2674
                                       errors.ECODE_NOENT)
2675
        elif mod[0] == constants.DDM_MODIFY:
2676
          if ext_provider:
2677
            raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2678
                                       " modification" %
2679
                                       constants.IDISK_PROVIDER,
2680
                                       errors.ECODE_INVAL)
2681
    else:
2682
      for mod in self.diskmod:
2683
        ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2684
        if ext_provider is not None:
2685
          raise errors.OpPrereqError("Parameter '%s' is only valid for"
2686
                                     " instances of type '%s'" %
2687
                                     (constants.IDISK_PROVIDER,
2688
                                      constants.DT_EXT),
2689
                                     errors.ECODE_INVAL)
2690

    
2691
    if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2692
      raise errors.OpPrereqError("Disk operations not supported for"
2693
                                 " diskless instances", errors.ECODE_INVAL)
2694

    
2695
    def _PrepareDiskMod(_, disk, params, __):
2696
      disk.name = params.get(constants.IDISK_NAME, None)
2697

    
2698
    # Verify disk changes (operating on a copy)
2699
    disks = copy.deepcopy(self.instance.disks)
2700
    _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2701
                        _PrepareDiskMod, None)
2702
    utils.ValidateDeviceNames("disk", disks)
2703
    if len(disks) > constants.MAX_DISKS:
2704
      raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2705
                                 " more" % constants.MAX_DISKS,
2706
                                 errors.ECODE_STATE)
2707
    disk_sizes = [disk.size for disk in self.instance.disks]
2708
    disk_sizes.extend(params["size"] for (op, idx, params, private) in
2709
                      self.diskmod if op == constants.DDM_ADD)
2710
    ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2711
    ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2712

    
2713
    if self.op.offline is not None and self.op.offline:
2714
      CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2715
                         msg="can't change to offline")
2716

    
2717
  def CheckPrereq(self):
2718
    """Check prerequisites.
2719

2720
    This only checks the instance list against the existing names.
2721

2722
    """
2723
    assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2724
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2725
    self.cluster = self.cfg.GetClusterInfo()
2726

    
2727
    assert self.instance is not None, \
2728
      "Cannot retrieve locked instance %s" % self.op.instance_name
2729

    
2730
    pnode_uuid = self.instance.primary_node
2731

    
2732
    self.warn = []
2733

    
2734
    if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2735
        not self.op.force):
2736
      # verify that the instance is not up
2737
      instance_info = self.rpc.call_instance_info(
2738
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2739
          self.instance.hvparams)
2740
      if instance_info.fail_msg:
2741
        self.warn.append("Can't get instance runtime information: %s" %
2742
                         instance_info.fail_msg)
2743
      elif instance_info.payload:
2744
        raise errors.OpPrereqError("Instance is still running on %s" %
2745
                                   self.cfg.GetNodeName(pnode_uuid),
2746
                                   errors.ECODE_STATE)
2747

    
2748
    assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2749
    node_uuids = list(self.instance.all_nodes)
2750
    pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2751

    
2752
    #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
2753
    assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2754
    group_info = self.cfg.GetNodeGroup(pnode_info.group)
2755

    
2756
    # dictionary with instance information after the modification
2757
    ispec = {}
2758

    
2759
    # Prepare NIC modifications
2760
    self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2761

    
2762
    # OS change
2763
    if self.op.os_name and not self.op.force:
2764
      CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2765
                     self.op.force_variant)
2766
      instance_os = self.op.os_name
2767
    else:
2768
      instance_os = self.instance.os
2769

    
2770
    assert not (self.op.disk_template and self.op.disks), \
2771
      "Can't modify disk template and apply disk changes at the same time"
2772

    
2773
    if self.op.disk_template:
2774
      self._PreCheckDiskTemplate(pnode_info)
2775

    
2776
    self._PreCheckDisks(ispec)
2777

    
2778
    # hvparams processing
2779
    if self.op.hvparams:
2780
      hv_type = self.instance.hypervisor
2781
      i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2782
      utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2783
      hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2784

    
2785
      # local check
2786
      hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2787
      CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2788
      self.hv_proposed = self.hv_new = hv_new # the new actual values
2789
      self.hv_inst = i_hvdict # the new dict (without defaults)
2790
    else:
2791
      self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2792
                                                   self.instance.os,
2793
                                                   self.instance.hvparams)
2794
      self.hv_new = self.hv_inst = {}
2795

    
2796
    # beparams processing
2797
    if self.op.beparams:
2798
      i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2799
                                  use_none=True)
2800
      objects.UpgradeBeParams(i_bedict)
2801
      utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2802
      be_new = self.cluster.SimpleFillBE(i_bedict)
2803
      self.be_proposed = self.be_new = be_new # the new actual values
2804
      self.be_inst = i_bedict # the new dict (without defaults)
2805
    else:
2806
      self.be_new = self.be_inst = {}
2807
      self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2808
    be_old = self.cluster.FillBE(self.instance)
2809

    
2810
    # CPU param validation -- checking every time a parameter is
2811
    # changed to cover all cases where either CPU mask or vcpus have
2812
    # changed
2813
    if (constants.BE_VCPUS in self.be_proposed and
2814
        constants.HV_CPU_MASK in self.hv_proposed):
2815
      cpu_list = \
2816
        utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2817
      # Verify mask is consistent with number of vCPUs. Can skip this
2818
      # test if only 1 entry in the CPU mask, which means same mask
2819
      # is applied to all vCPUs.
2820
      if (len(cpu_list) > 1 and
2821
          len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2822
        raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2823
                                   " CPU mask [%s]" %
2824
                                   (self.be_proposed[constants.BE_VCPUS],
2825
                                    self.hv_proposed[constants.HV_CPU_MASK]),
2826
                                   errors.ECODE_INVAL)
2827

    
2828
      # Only perform this test if a new CPU mask is given
2829
      if constants.HV_CPU_MASK in self.hv_new:
2830
        # Calculate the largest CPU number requested
2831
        max_requested_cpu = max(map(max, cpu_list))
2832
        # Check that all of the instance's nodes have enough physical CPUs to
2833
        # satisfy the requested CPU mask
2834
        hvspecs = [(self.instance.hypervisor,
2835
                    self.cfg.GetClusterInfo()
2836
                      .hvparams[self.instance.hypervisor])]
2837
        _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2838
                                max_requested_cpu + 1,
2839
                                hvspecs)
2840

    
2841
    # osparams processing
2842
    if self.op.osparams:
2843
      i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2844
      CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2845
      self.os_inst = i_osdict # the new dict (without defaults)
2846
    else:
2847
      self.os_inst = {}
2848

    
2849
    #TODO(dynmem): do the appropriate check involving MINMEM
2850
    if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2851
        be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2852
      mem_check_list = [pnode_uuid]
2853
      if be_new[constants.BE_AUTO_BALANCE]:
2854
        # either we changed auto_balance to yes or it was from before
2855
        mem_check_list.extend(self.instance.secondary_nodes)
2856
      instance_info = self.rpc.call_instance_info(
2857
          pnode_uuid, self.instance.name, self.instance.hypervisor,
2858
          self.instance.hvparams)
2859
      hvspecs = [(self.instance.hypervisor,
2860
                  self.cluster.hvparams[self.instance.hypervisor])]
2861
      nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2862
                                         hvspecs, False)
2863
      pninfo = nodeinfo[pnode_uuid]
2864
      msg = pninfo.fail_msg
2865
      if msg:
2866
        # Assume the primary node is unreachable and go ahead
2867
        self.warn.append("Can't get info from primary node %s: %s" %
2868
                         (self.cfg.GetNodeName(pnode_uuid), msg))
2869
      else:
2870
        (_, _, (pnhvinfo, )) = pninfo.payload
2871
        if not isinstance(pnhvinfo.get("memory_free", None), int):
2872
          self.warn.append("Node data from primary node %s doesn't contain"
2873
                           " free memory information" %
2874
                           self.cfg.GetNodeName(pnode_uuid))
2875
        elif instance_info.fail_msg:
2876
          self.warn.append("Can't get instance runtime information: %s" %
2877
                           instance_info.fail_msg)
2878
        else:
2879
          if instance_info.payload:
2880
            current_mem = int(instance_info.payload["memory"])
2881
          else:
2882
            # Assume instance not running
2883
            # (there is a slight race condition here, but it's not very
2884
            # probable, and we have no other way to check)
2885
            # TODO: Describe race condition
2886
            current_mem = 0
2887
          #TODO(dynmem): do the appropriate check involving MINMEM
2888
          miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2889
                      pnhvinfo["memory_free"])
2890
          if miss_mem > 0:
2891
            raise errors.OpPrereqError("This change will prevent the instance"
2892
                                       " from starting, due to %d MB of memory"
2893
                                       " missing on its primary node" %
2894
                                       miss_mem, errors.ECODE_NORES)
2895

    
2896
      if be_new[constants.BE_AUTO_BALANCE]:
2897
        for node_uuid, nres in nodeinfo.items():
2898
          if node_uuid not in self.instance.secondary_nodes:
2899
            continue
2900
          nres.Raise("Can't get info from secondary node %s" %
2901
                     self.cfg.GetNodeName(node_uuid), prereq=True,
2902
                     ecode=errors.ECODE_STATE)
2903
          (_, _, (nhvinfo, )) = nres.payload
2904
          if not isinstance(nhvinfo.get("memory_free", None), int):
2905
            raise errors.OpPrereqError("Secondary node %s didn't return free"
2906
                                       " memory information" %
2907
                                       self.cfg.GetNodeName(node_uuid),
2908
                                       errors.ECODE_STATE)
2909
          #TODO(dynmem): do the appropriate check involving MINMEM
2910
          elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2911
            raise errors.OpPrereqError("This change will prevent the instance"
2912
                                       " from failover to its secondary node"
2913
                                       " %s, due to not enough memory" %
2914
                                       self.cfg.GetNodeName(node_uuid),
2915
                                       errors.ECODE_STATE)
2916

    
2917
    if self.op.runtime_mem:
2918
      remote_info = self.rpc.call_instance_info(
2919
         self.instance.primary_node, self.instance.name,
2920
         self.instance.hypervisor,
2921
         self.cluster.hvparams[self.instance.hypervisor])
2922
      remote_info.Raise("Error checking node %s" %
2923
                        self.cfg.GetNodeName(self.instance.primary_node))
2924
      if not remote_info.payload: # not running already
2925
        raise errors.OpPrereqError("Instance %s is not running" %
2926
                                   self.instance.name, errors.ECODE_STATE)
2927

    
2928
      current_memory = remote_info.payload["memory"]
2929
      if (not self.op.force and
2930
           (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2931
            self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2932
        raise errors.OpPrereqError("Instance %s must have memory between %d"
2933
                                   " and %d MB of memory unless --force is"
2934
                                   " given" %
2935
                                   (self.instance.name,
2936
                                    self.be_proposed[constants.BE_MINMEM],
2937
                                    self.be_proposed[constants.BE_MAXMEM]),
2938
                                   errors.ECODE_INVAL)
2939

    
2940
      delta = self.op.runtime_mem - current_memory
2941
      if delta > 0:
2942
        CheckNodeFreeMemory(
2943
            self, self.instance.primary_node,
2944
            "ballooning memory for instance %s" % self.instance.name, delta,
2945
            self.instance.hypervisor,
2946
            self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
2947

    
2948
    # make self.cluster visible in the functions below
2949
    cluster = self.cluster
2950

    
2951
    def _PrepareNicCreate(_, params, private):
2952
      self._PrepareNicModification(params, private, None, None,
2953
                                   {}, cluster, pnode_uuid)
2954
      return (None, None)
2955

    
2956
    def _PrepareNicMod(_, nic, params, private):
2957
      self._PrepareNicModification(params, private, nic.ip, nic.network,
2958
                                   nic.nicparams, cluster, pnode_uuid)
2959
      return None
2960

    
2961
    def _PrepareNicRemove(_, params, __):
2962
      ip = params.ip
2963
      net = params.network
2964
      if net is not None and ip is not None:
2965
        self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2966

    
2967
    # Verify NIC changes (operating on copy)
2968
    nics = self.instance.nics[:]
2969
    _ApplyContainerMods("NIC", nics, None, self.nicmod,
2970
                        _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2971
    if len(nics) > constants.MAX_NICS:
2972
      raise errors.OpPrereqError("Instance has too many network interfaces"
2973
                                 " (%d), cannot add more" % constants.MAX_NICS,
2974
                                 errors.ECODE_STATE)
2975

    
2976
    # Pre-compute NIC changes (necessary to use result in hooks)
2977
    self._nic_chgdesc = []
2978
    if self.nicmod:
2979
      # Operate on copies as this is still in prereq
2980
      nics = [nic.Copy() for nic in self.instance.nics]
2981
      _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2982
                          self._CreateNewNic, self._ApplyNicMods, None)
2983
      # Verify that NIC names are unique and valid
2984
      utils.ValidateDeviceNames("NIC", nics)
2985
      self._new_nics = nics
2986
      ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
2987
    else:
2988
      self._new_nics = None
2989
      ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
2990

    
2991
    if not self.op.ignore_ipolicy:
2992
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2993
                                                              group_info)
2994

    
2995
      # Fill ispec with backend parameters
2996
      ispec[constants.ISPEC_SPINDLE_USE] = \
2997
        self.be_new.get(constants.BE_SPINDLE_USE, None)
2998
      ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
2999
                                                         None)
3000

    
3001
      # Copy ispec to verify parameters with min/max values separately
3002
      if self.op.disk_template:
3003
        new_disk_template = self.op.disk_template
3004
      else:
3005
        new_disk_template = self.instance.disk_template
3006
      ispec_max = ispec.copy()
3007
      ispec_max[constants.ISPEC_MEM_SIZE] = \
3008
        self.be_new.get(constants.BE_MAXMEM, None)
3009
      res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3010
                                                     new_disk_template)
3011
      ispec_min = ispec.copy()
3012
      ispec_min[constants.ISPEC_MEM_SIZE] = \
3013
        self.be_new.get(constants.BE_MINMEM, None)
3014
      res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3015
                                                     new_disk_template)
3016

    
3017
      if (res_max or res_min):
3018
        # FIXME: Improve error message by including information about whether
3019
        # the upper or lower limit of the parameter fails the ipolicy.
3020
        msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3021
               (group_info, group_info.name,
3022
                utils.CommaJoin(set(res_max + res_min))))
3023
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3024

    
3025
  def _ConvertPlainToDrbd(self, feedback_fn):
3026
    """Converts an instance from plain to drbd.
3027

3028
    """
3029
    feedback_fn("Converting template to drbd")
3030
    pnode_uuid = self.instance.primary_node
3031
    snode_uuid = self.op.remote_node_uuid
3032

    
3033
    assert self.instance.disk_template == constants.DT_PLAIN
3034

    
3035
    # create a fake disk info for _GenerateDiskTemplate
3036
    disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3037
                  constants.IDISK_VG: d.logical_id[0],
3038
                  constants.IDISK_NAME: d.name}
3039
                 for d in self.instance.disks]
3040
    new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3041
                                     self.instance.uuid, pnode_uuid,
3042
                                     [snode_uuid], disk_info, None, None, 0,
3043
                                     feedback_fn, self.diskparams)
3044
    anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3045
                                        self.diskparams)
3046
    p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3047
    s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3048
    info = GetInstanceInfoText(self.instance)
3049
    feedback_fn("Creating additional volumes...")
3050
    # first, create the missing data and meta devices
3051
    for disk in anno_disks:
3052
      # unfortunately this is... not too nice
3053
      CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3054
                           info, True, p_excl_stor)
3055
      for child in disk.children:
3056
        CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3057
                             s_excl_stor)
3058
    # at this stage, all new LVs have been created, we can rename the
3059
    # old ones
3060
    feedback_fn("Renaming original volumes...")
3061
    rename_list = [(o, n.children[0].logical_id)
3062
                   for (o, n) in zip(self.instance.disks, new_disks)]
3063
    result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3064
    result.Raise("Failed to rename original LVs")
3065

    
3066
    feedback_fn("Initializing DRBD devices...")
3067
    # all child devices are in place, we can now create the DRBD devices
3068
    try:
3069
      for disk in anno_disks:
3070
        for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3071
                                       (snode_uuid, s_excl_stor)]:
3072
          f_create = node_uuid == pnode_uuid
3073
          CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3074
                               f_create, excl_stor)
3075
    except errors.GenericError, e:
3076
      feedback_fn("Initializing of DRBD devices failed;"
3077
                  " renaming back original volumes...")
3078
      for disk in new_disks:
3079
        self.cfg.SetDiskID(disk, pnode_uuid)
3080
      rename_back_list = [(n.children[0], o.logical_id)
3081
                          for (n, o) in zip(new_disks, self.instance.disks)]
3082
      result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3083
      result.Raise("Failed to rename LVs back after error %s" % str(e))
3084
      raise
3085

    
3086
    # at this point, the instance has been modified
3087
    self.instance.disk_template = constants.DT_DRBD8
3088
    self.instance.disks = new_disks
3089
    self.cfg.Update(self.instance, feedback_fn)
3090

    
3091
    # Release node locks while waiting for sync
3092
    ReleaseLocks(self, locking.LEVEL_NODE)
3093

    
3094
    # disks are created, waiting for sync
3095
    disk_abort = not WaitForSync(self, self.instance,
3096
                                 oneshot=not self.op.wait_for_sync)
3097
    if disk_abort:
3098
      raise errors.OpExecError("There are some degraded disks for"
3099
                               " this instance, please cleanup manually")
3100

    
3101
    # Node resource locks will be released by caller
3102

    
3103
  def _ConvertDrbdToPlain(self, feedback_fn):
3104
    """Converts an instance from drbd to plain.
3105

3106
    """
3107
    assert len(self.instance.secondary_nodes) == 1
3108
    assert self.instance.disk_template == constants.DT_DRBD8
3109

    
3110
    pnode_uuid = self.instance.primary_node
3111
    snode_uuid = self.instance.secondary_nodes[0]
3112
    feedback_fn("Converting template to plain")
3113

    
3114
    old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3115
    new_disks = [d.children[0] for d in self.instance.disks]
3116

    
3117
    # copy over size, mode and name
3118
    for parent, child in zip(old_disks, new_disks):
3119
      child.size = parent.size
3120
      child.mode = parent.mode
3121
      child.name = parent.name
3122

    
3123
    # this is a DRBD disk, return its port to the pool
3124
    # NOTE: this must be done right before the call to cfg.Update!
3125
    for disk in old_disks:
3126
      tcp_port = disk.logical_id[2]
3127
      self.cfg.AddTcpUdpPort(tcp_port)
3128

    
3129
    # update instance structure
3130
    self.instance.disks = new_disks
3131
    self.instance.disk_template = constants.DT_PLAIN
3132
    _UpdateIvNames(0, self.instance.disks)
3133
    self.cfg.Update(self.instance, feedback_fn)
3134

    
3135
    # Release locks in case removing disks takes a while
3136
    ReleaseLocks(self, locking.LEVEL_NODE)
3137

    
3138
    feedback_fn("Removing volumes on the secondary node...")
3139
    for disk in old_disks:
3140
      self.cfg.SetDiskID(disk, snode_uuid)
3141
      msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3142
      if msg:
3143
        self.LogWarning("Could not remove block device %s on node %s,"
3144
                        " continuing anyway: %s", disk.iv_name,
3145
                        self.cfg.GetNodeName(snode_uuid), msg)
3146

    
3147
    feedback_fn("Removing unneeded volumes on the primary node...")
3148
    for idx, disk in enumerate(old_disks):
3149
      meta = disk.children[1]
3150
      self.cfg.SetDiskID(meta, pnode_uuid)
3151
      msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3152
      if msg:
3153
        self.LogWarning("Could not remove metadata for disk %d on node %s,"
3154
                        " continuing anyway: %s", idx,
3155
                        self.cfg.GetNodeName(pnode_uuid), msg)
3156

    
3157
  def _CreateNewDisk(self, idx, params, _):
3158
    """Creates a new disk.
3159

3160
    """
3161
    # add a new disk
3162
    if self.instance.disk_template in constants.DTS_FILEBASED:
3163
      (file_driver, file_path) = self.instance.disks[0].logical_id
3164
      file_path = os.path.dirname(file_path)
3165
    else:
3166
      file_driver = file_path = None
3167

    
3168
    disk = \
3169
      GenerateDiskTemplate(self, self.instance.disk_template,
3170
                           self.instance.uuid, self.instance.primary_node,
3171
                           self.instance.secondary_nodes, [params], file_path,
3172
                           file_driver, idx, self.Log, self.diskparams)[0]
3173

    
3174
    new_disks = CreateDisks(self, self.instance, disks=[disk])
3175

    
3176
    if self.cluster.prealloc_wipe_disks:
3177
      # Wipe new disk
3178
      WipeOrCleanupDisks(self, self.instance,
3179
                         disks=[(idx, disk, 0)],
3180
                         cleanup=new_disks)
3181

    
3182
    return (disk, [
3183
      ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3184
      ])
3185

    
3186
  @staticmethod
3187
  def _ModifyDisk(idx, disk, params, _):
3188
    """Modifies a disk.
3189

3190
    """
3191
    changes = []
3192
    mode = params.get(constants.IDISK_MODE, None)
3193
    if mode:
3194
      disk.mode = mode
3195
      changes.append(("disk.mode/%d" % idx, disk.mode))
3196

    
3197
    name = params.get(constants.IDISK_NAME, None)
3198
    disk.name = name
3199
    changes.append(("disk.name/%d" % idx, disk.name))
3200

    
3201
    return changes
3202

    
3203
  def _RemoveDisk(self, idx, root, _):
3204
    """Removes a disk.
3205

3206
    """
3207
    (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3208
    for node_uuid, disk in anno_disk.ComputeNodeTree(
3209
                             self.instance.primary_node):
3210
      self.cfg.SetDiskID(disk, node_uuid)
3211
      msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3212
      if msg:
3213
        self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3214
                        " continuing anyway", idx,
3215
                        self.cfg.GetNodeName(node_uuid), msg)
3216

    
3217
    # if this is a DRBD disk, return its port to the pool
3218
    if root.dev_type in constants.LDS_DRBD:
3219
      self.cfg.AddTcpUdpPort(root.logical_id[2])
3220

    
3221
  def _CreateNewNic(self, idx, params, private):
3222
    """Creates data structure for a new network interface.
3223

3224
    """
3225
    mac = params[constants.INIC_MAC]
3226
    ip = params.get(constants.INIC_IP, None)
3227
    net = params.get(constants.INIC_NETWORK, None)
3228
    name = params.get(constants.INIC_NAME, None)
3229
    net_uuid = self.cfg.LookupNetwork(net)
3230
    #TODO: not private.filled?? can a nic have no nicparams??
3231
    nicparams = private.filled
3232
    nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3233
                       nicparams=nicparams)
3234
    nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3235

    
3236
    return (nobj, [
3237
      ("nic.%d" % idx,
3238
       "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3239
       (mac, ip, private.filled[constants.NIC_MODE],
3240
       private.filled[constants.NIC_LINK],
3241
       net)),
3242
      ])
3243

    
3244
  def _ApplyNicMods(self, idx, nic, params, private):
3245
    """Modifies a network interface.
3246

3247
    """
3248
    changes = []
3249

    
3250
    for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3251
      if key in params:
3252
        changes.append(("nic.%s/%d" % (key, idx), params[key]))
3253
        setattr(nic, key, params[key])
3254

    
3255
    new_net = params.get(constants.INIC_NETWORK, nic.network)
3256
    new_net_uuid = self.cfg.LookupNetwork(new_net)
3257
    if new_net_uuid != nic.network:
3258
      changes.append(("nic.network/%d" % idx, new_net))
3259
      nic.network = new_net_uuid
3260

    
3261
    if private.filled:
3262
      nic.nicparams = private.filled
3263

    
3264
      for (key, val) in nic.nicparams.items():
3265
        changes.append(("nic.%s/%d" % (key, idx), val))
3266

    
3267
    return changes
3268

    
3269
  def Exec(self, feedback_fn):
3270
    """Modifies an instance.
3271

3272
    All parameters take effect only at the next restart of the instance.
3273

3274
    """
3275
    # Process here the warnings from CheckPrereq, as we don't have a
3276
    # feedback_fn there.
3277
    # TODO: Replace with self.LogWarning
3278
    for warn in self.warn:
3279
      feedback_fn("WARNING: %s" % warn)
3280

    
3281
    assert ((self.op.disk_template is None) ^
3282
            bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3283
      "Not owning any node resource locks"
3284

    
3285
    result = []
3286

    
3287
    # New primary node
3288
    if self.op.pnode_uuid:
3289
      self.instance.primary_node = self.op.pnode_uuid
3290

    
3291
    # runtime memory
3292
    if self.op.runtime_mem:
3293
      rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3294
                                                     self.instance,
3295
                                                     self.op.runtime_mem)
3296
      rpcres.Raise("Cannot modify instance runtime memory")
3297
      result.append(("runtime_memory", self.op.runtime_mem))
3298

    
3299
    # Apply disk changes
3300
    _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3301
                        self._CreateNewDisk, self._ModifyDisk,
3302
                        self._RemoveDisk)
3303
    _UpdateIvNames(0, self.instance.disks)
3304

    
3305
    if self.op.disk_template:
3306
      if __debug__:
3307
        check_nodes = set(self.instance.all_nodes)
3308
        if self.op.remote_node_uuid:
3309
          check_nodes.add(self.op.remote_node_uuid)
3310
        for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3311
          owned = self.owned_locks(level)
3312
          assert not (check_nodes - owned), \
3313
            ("Not owning the correct locks, owning %r, expected at least %r" %
3314
             (owned, check_nodes))
3315

    
3316
      r_shut = ShutdownInstanceDisks(self, self.instance)
3317
      if not r_shut:
3318
        raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3319
                                 " proceed with disk template conversion")
3320
      mode = (self.instance.disk_template, self.op.disk_template)
3321
      try:
3322
        self._DISK_CONVERSIONS[mode](self, feedback_fn)
3323
      except:
3324
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3325
        raise
3326
      result.append(("disk_template", self.op.disk_template))
3327

    
3328
      assert self.instance.disk_template == self.op.disk_template, \
3329
        ("Expected disk template '%s', found '%s'" %
3330
         (self.op.disk_template, self.instance.disk_template))
3331

    
3332
    # Release node and resource locks if there are any (they might already have
3333
    # been released during disk conversion)
3334
    ReleaseLocks(self, locking.LEVEL_NODE)
3335
    ReleaseLocks(self, locking.LEVEL_NODE_RES)
3336

    
3337
    # Apply NIC changes
3338
    if self._new_nics is not None:
3339
      self.instance.nics = self._new_nics
3340
      result.extend(self._nic_chgdesc)
3341

    
3342
    # hvparams changes
3343
    if self.op.hvparams:
3344
      self.instance.hvparams = self.hv_inst
3345
      for key, val in self.op.hvparams.iteritems():
3346
        result.append(("hv/%s" % key, val))
3347

    
3348
    # beparams changes
3349
    if self.op.beparams:
3350
      self.instance.beparams = self.be_inst
3351
      for key, val in self.op.beparams.iteritems():
3352
        result.append(("be/%s" % key, val))
3353

    
3354
    # OS change
3355
    if self.op.os_name:
3356
      self.instance.os = self.op.os_name
3357

    
3358
    # osparams changes
3359
    if self.op.osparams:
3360
      self.instance.osparams = self.os_inst
3361
      for key, val in self.op.osparams.iteritems():
3362
        result.append(("os/%s" % key, val))
3363

    
3364
    if self.op.offline is None:
3365
      # Ignore
3366
      pass
3367
    elif self.op.offline:
3368
      # Mark instance as offline
3369
      self.cfg.MarkInstanceOffline(self.instance.uuid)
3370
      result.append(("admin_state", constants.ADMINST_OFFLINE))
3371
    else:
3372
      # Mark instance as online, but stopped
3373
      self.cfg.MarkInstanceDown(self.instance.uuid)
3374
      result.append(("admin_state", constants.ADMINST_DOWN))
3375

    
3376
    self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3377

    
3378
    assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3379
                self.owned_locks(locking.LEVEL_NODE)), \
3380
      "All node locks should have been released by now"
3381

    
3382
    return result
3383

    
3384
  _DISK_CONVERSIONS = {
3385
    (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3386
    (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3387
    }
3388

    
3389

    
3390
class LUInstanceChangeGroup(LogicalUnit):
3391
  HPATH = "instance-change-group"
3392
  HTYPE = constants.HTYPE_INSTANCE
3393
  REQ_BGL = False
3394

    
3395
  def ExpandNames(self):
3396
    self.share_locks = ShareAll()
3397

    
3398
    self.needed_locks = {
3399
      locking.LEVEL_NODEGROUP: [],
3400
      locking.LEVEL_NODE: [],
3401
      locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
3402
      }
3403

    
3404
    self._ExpandAndLockInstance()
3405

    
3406
    if self.op.target_groups:
3407
      self.req_target_uuids = map(self.cfg.LookupNodeGroup,
3408
                                  self.op.target_groups)
3409
    else:
3410
      self.req_target_uuids = None
3411

    
3412
    self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3413

    
3414
  def DeclareLocks(self, level):
3415
    if level == locking.LEVEL_NODEGROUP:
3416
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3417

    
3418
      if self.req_target_uuids:
3419
        lock_groups = set(self.req_target_uuids)
3420

    
3421
        # Lock all groups used by instance optimistically; this requires going
3422
        # via the node before it's locked, requiring verification later on
3423
        instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3424
        lock_groups.update(instance_groups)
3425
      else:
3426
        # No target groups, need to lock all of them
3427
        lock_groups = locking.ALL_SET
3428

    
3429
      self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3430

    
3431
    elif level == locking.LEVEL_NODE:
3432
      if self.req_target_uuids:
3433
        # Lock all nodes used by instances
3434
        self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3435
        self._LockInstancesNodes()
3436

    
3437
        # Lock all nodes in all potential target groups
3438
        lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3439
                       self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3440
        member_nodes = [node_uuid
3441
                        for group in lock_groups
3442
                        for node_uuid in self.cfg.GetNodeGroup(group).members]
3443
        self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3444
      else:
3445
        # Lock all nodes as all groups are potential targets
3446
        self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3447

    
3448
  def CheckPrereq(self):
3449
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3450
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3451
    owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3452

    
3453
    assert (self.req_target_uuids is None or
3454
            owned_groups.issuperset(self.req_target_uuids))
3455
    assert owned_instance_names == set([self.op.instance_name])
3456

    
3457
    # Get instance information
3458
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3459

    
3460
    # Check if node groups for locked instance are still correct
3461
    assert owned_nodes.issuperset(self.instance.all_nodes), \
3462
      ("Instance %s's nodes changed while we kept the lock" %
3463
       self.op.instance_name)
3464

    
3465
    inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3466
                                          owned_groups)
3467

    
3468
    if self.req_target_uuids:
3469
      # User requested specific target groups
3470
      self.target_uuids = frozenset(self.req_target_uuids)
3471
    else:
3472
      # All groups except those used by the instance are potential targets
3473
      self.target_uuids = owned_groups - inst_groups
3474

    
3475
    conflicting_groups = self.target_uuids & inst_groups
3476
    if conflicting_groups:
3477
      raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3478
                                 " used by the instance '%s'" %
3479
                                 (utils.CommaJoin(conflicting_groups),
3480
                                  self.op.instance_name),
3481
                                 errors.ECODE_INVAL)
3482

    
3483
    if not self.target_uuids:
3484
      raise errors.OpPrereqError("There are no possible target groups",
3485
                                 errors.ECODE_INVAL)
3486

    
3487
  def BuildHooksEnv(self):
3488
    """Build hooks env.
3489

3490
    """
3491
    assert self.target_uuids
3492

    
3493
    env = {
3494
      "TARGET_GROUPS": " ".join(self.target_uuids),
3495
      }
3496

    
3497
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
3498

    
3499
    return env
3500

    
3501
  def BuildHooksNodes(self):
3502
    """Build hooks nodes.
3503

3504
    """
3505
    mn = self.cfg.GetMasterNode()
3506
    return ([mn], [mn])
3507

    
3508
  def Exec(self, feedback_fn):
3509
    instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3510

    
3511
    assert instances == [self.op.instance_name], "Instance not locked"
3512

    
3513
    req = iallocator.IAReqGroupChange(instances=instances,
3514
                                      target_groups=list(self.target_uuids))
3515
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3516

    
3517
    ial.Run(self.op.iallocator)
3518

    
3519
    if not ial.success:
3520
      raise errors.OpPrereqError("Can't compute solution for changing group of"
3521
                                 " instance '%s' using iallocator '%s': %s" %
3522
                                 (self.op.instance_name, self.op.iallocator,
3523
                                  ial.info), errors.ECODE_NORES)
3524

    
3525
    jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3526

    
3527
    self.LogInfo("Iallocator returned %s job(s) for changing group of"
3528
                 " instance '%s'", len(jobs), self.op.instance_name)
3529

    
3530
    return ResultWithJobs(jobs)